diff
stringlengths
139
3.65k
message
stringlengths
8
627
diff_languages
stringclasses
1 value
diff --git a/stanza/utils/training/run_pos.py b/stanza/utils/training/run_pos.py index <HASH>..<HASH> 100644 --- a/stanza/utils/training/run_pos.py +++ b/stanza/utils/training/run_pos.py @@ -49,6 +49,11 @@ def run_treebank(mode, paths, treebank, short_name, "--shorthand", short_name, "--mode", "train"] + if short_language in ("cop", "orv", "pcm", "qtd", "swl"): + # we couldn't find word vectors for these languages: + # coptic, naija, old russian, turkish german, swedish sign language + train_args.append("--no_pretrain") + train_args = train_args + extra_args logger.info("Running train POS for {} with args {}".format(treebank, train_args)) tagger.main(train_args)
A few languages don't have pretrains for POS
py
diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py b/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py index <HASH>..<HASH> 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py @@ -359,7 +359,7 @@ def main(): raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples)) if data_args.max_eval_samples is not None: - raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_train_samples)) + raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples)) def prepare_dataset(batch): # process audio
Update run_speech_recognition_seq2seq.py (#<I>)
py
diff --git a/trimesh/viewer/windowed.py b/trimesh/viewer/windowed.py index <HASH>..<HASH> 100644 --- a/trimesh/viewer/windowed.py +++ b/trimesh/viewer/windowed.py @@ -154,7 +154,7 @@ class SceneViewer(pyglet.window.Window): if self.callback is not None: self.callback(self.scene) for name, mesh in self.scene.geometry.items(): - if self.vertex_list_hash[name] != geometry_hash(mesh): + if self.vertex_list_hash.get(name, None) != geometry_hash(mesh): self.add_geometry(name, mesh) def add_geometry(self, name, geometry, **kwargs):
Avoid KeyError for real-time updation and rendering
py
diff --git a/crasync/core.py b/crasync/core.py index <HASH>..<HASH> 100644 --- a/crasync/core.py +++ b/crasync/core.py @@ -39,7 +39,8 @@ class Client: BASE = 'http://api.cr-api.com' - def __init__(self, session=None): + def __init__(self, session=None, timeout=10): + self.timeout = timeout self.session = session or aiohttp.ClientSession() async def __aenter__(self): @@ -52,7 +53,7 @@ class Client: self.session.close() async def request(self, url): - async with self.session.get(url) as resp: + async with self.session.get(url, timeout=self.timeout) as resp: try: data = await resp.json() except (asyncio.TimeoutError, aiohttp.ClientResponseError):
Added an async timeout
py
diff --git a/warehouse/macaroons/caveats.py b/warehouse/macaroons/caveats.py index <HASH>..<HASH> 100644 --- a/warehouse/macaroons/caveats.py +++ b/warehouse/macaroons/caveats.py @@ -86,5 +86,8 @@ class Verifier: try: return self.verifier.verify(self.macaroon, key) - except pymacaroons.exceptions.MacaroonInvalidSignatureException: + except ( + pymacaroons.exceptions.MacaroonInvalidSignatureException, + Exception, # https://github.com/ecordell/pymacaroons/issues/50 + ): raise InvalidMacaroon("invalid macaroon signature")
Catch more pymacaroons exceptions (#<I>)
py
diff --git a/salt/modules/win_file.py b/salt/modules/win_file.py index <HASH>..<HASH> 100644 --- a/salt/modules/win_file.py +++ b/salt/modules/win_file.py @@ -1172,7 +1172,14 @@ def readlink(path): if target.startswith('\\??\\'): target = target[4:] - # comes out in 8.3 form; convert it to LFN to make it look nicer - target = win32file.GetLongPathName(target) + + try: + # comes out in 8.3 form; convert it to LFN to make it look nicer + target = win32file.GetLongPathName(target) + except pywinerror as e: + # if file is not found (i.e. bad symlink), return it anyway like on *nix + if e.winerror == 2: + return target + raise return target
More compatibility changes - win_file.readlink now returns path regardless of if it is valid.
py
diff --git a/dtool_s3/storagebroker.py b/dtool_s3/storagebroker.py index <HASH>..<HASH> 100644 --- a/dtool_s3/storagebroker.py +++ b/dtool_s3/storagebroker.py @@ -464,6 +464,9 @@ class S3StorageBroker(object): for identifier in manifest["items"]: self.make_key_public(self.data_key_prefix + identifier) + http_manifest = self.generate_http_manifest() + self.write_http_manifest(http_manifest) + access_url = "https://{}.s3.amazonaws.com/{}".format(self.bucket, self.uuid) return access_url
Fix defect in http_enable; write out missing http_manifest and make public
py
diff --git a/zipline/assets/assets.py b/zipline/assets/assets.py index <HASH>..<HASH> 100644 --- a/zipline/assets/assets.py +++ b/zipline/assets/assets.py @@ -900,14 +900,11 @@ class AssetFinderCachedEquities(AssetFinder): fuzzy_symbol, [] ).append(asset) - def _convert_row_to_equity(self, equity): + def _convert_row_to_equity(self, row): """ Converts a SQLAlchemy equity row to an Equity object. """ - data = dict(equity.items()) - _convert_asset_timestamp_fields(data) - asset = Equity(**data) - return asset + return Equity(**_convert_asset_timestamp_fields(dict(row))) def _get_fuzzy_candidates(self, fuzzy_symbol): if fuzzy_symbol in self.fuzzy_symbol_hashed_equities:
PERF: Fewer conversions in _convert_row_to_equity.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -17,7 +17,7 @@ setup( include_package_data=True, # Package dependencies. - install_requires=['oauth2'], + install_requires=['twython', 'oauth2'], # Metadata for PyPI. author='Antonio Hinojo Montero',
added twython as package dependency
py
diff --git a/cbamf/comp/psfs.py b/cbamf/comp/psfs.py index <HASH>..<HASH> 100644 --- a/cbamf/comp/psfs.py +++ b/cbamf/comp/psfs.py @@ -603,7 +603,7 @@ class GaussianMomentExpansion(PSF4D): skew = self._poly(z, self._skew_coeffs(d)) skewval = top*(np.tanh(skew) + 1) - top - return skewval*(3 - 6*x**2 + x**4) + return skewval*(3*x - x**3) @memoize() def _kurtosis(self, x, z, d=0):
wrong skew polynomial
py
diff --git a/ibm_mq/datadog_checks/ibm_mq/collectors/stats_collector.py b/ibm_mq/datadog_checks/ibm_mq/collectors/stats_collector.py index <HASH>..<HASH> 100644 --- a/ibm_mq/datadog_checks/ibm_mq/collectors/stats_collector.py +++ b/ibm_mq/datadog_checks/ibm_mq/collectors/stats_collector.py @@ -73,7 +73,10 @@ class StatsCollector(object): else: raise finally: - queue.close() + try: + queue.close() + except pymqi.PYIFError as e: + self.log.debug("Could not close queue: %s", str(e)) def _collect_channel_stats(self, channel_stats): self.log.debug('Collect channel stats. Number of channels: %s', len(channel_stats.channels))
Add try-catch on queue closure (#<I>) * Add try-catch on queue closure
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ instructions. setup( name="django-prometheus", - version="1.0.5", + version="1.0.6", author="Uriel Corfa", author_email="[email protected]", description=( @@ -25,7 +25,6 @@ setup( long_description=LONG_DESCRIPTION, install_requires=[ "prometheus_client>=0.0.13", - "pip-prometheus>=1.0.0", ], classifiers=[ "Development Status :: 5 - Production/Stable",
Release <I> Fixes #<I> as use of pip is no longer required.
py
diff --git a/securitycenter/sc4.py b/securitycenter/sc4.py index <HASH>..<HASH> 100644 --- a/securitycenter/sc4.py +++ b/securitycenter/sc4.py @@ -8,6 +8,7 @@ import httplib import logging import mimetypes import os +import ssl import random from StringIO import StringIO from urllib import urlencode @@ -214,7 +215,7 @@ class SecurityCenter4(object): content_type = 'multipart/form-data; boundary=%s' % boundry return content_type, payload - def _request(self, module, action, data={}, headers={}, dejson=True, + def _request(self, module, action, data=None, headers=None, dejson=True, filename=False): ''' This is the core internal function for interacting with the API. All @@ -291,7 +292,7 @@ class SecurityCenter4(object): ])) # Now it's time to make the connection and actually talk to SC. - resp = urlopen(Request(self._url, payload, headers)) + resp = urlopen(Request(self._url, payload, headers), context=ssl.SSLContext(ssl.PROTOCOL_TLSv1)) data = resp.read() # And we need to log the response as well....
Updated SC4 to no longer check Cert validity
py
diff --git a/crackmapexec.py b/crackmapexec.py index <HASH>..<HASH> 100755 --- a/crackmapexec.py +++ b/crackmapexec.py @@ -2655,6 +2655,10 @@ if __name__ == '__main__': bgroup.add_argument("--upload", nargs=2, metavar=('SRC', 'DST'), help="Upload a file to the remote systems") bgroup.add_argument("--delete", metavar="PATH", help="Delete a remote file") + if len(sys.argv) == 1: + parser.print_help() + sys.exit(1) + args = parser.parse_args() if args.verbose:
usage will now display if called script is called with no arguments
py
diff --git a/raincloudy/faucet.py b/raincloudy/faucet.py index <HASH>..<HASH> 100644 --- a/raincloudy/faucet.py +++ b/raincloudy/faucet.py @@ -106,8 +106,10 @@ class RainCloudyFaucetCore(object): @property def battery(self): """Return faucet battery.""" - battery_level = self._lookup_attr('active_faucet_battery_level') - return battery_level if isinstance(battery_level, int) else None + battery = self._lookup_attr('active_faucet_battery_level') + if battery == '' or battery is None: + return None + return battery.strip('%') def update(self): """Callback self._controller.update()."""
Fix faucet battery property (#<I>)
py
diff --git a/draw_test.py b/draw_test.py index <HASH>..<HASH> 100644 --- a/draw_test.py +++ b/draw_test.py @@ -15,7 +15,7 @@ class App(pyxel.App): '550f99f999f99f05', '550f00f000f00f05', '5509009050900905', '5550550555055055' ] - self.image.set(0, 0, 16, 16, image_data) + self.image.set(0, 0, image_data) self.bank(0, self.image) self.pal_test_is_enabled = False
Removed the unnecessary arguments of the Image set method
py
diff --git a/tests/lax_numpy_einsum_test.py b/tests/lax_numpy_einsum_test.py index <HASH>..<HASH> 100644 --- a/tests/lax_numpy_einsum_test.py +++ b/tests/lax_numpy_einsum_test.py @@ -22,7 +22,6 @@ import itertools import numpy as onp from absl.testing import absltest from absl.testing import parameterized -import six import jax.numpy as np import jax.test_util as jtu @@ -214,10 +213,6 @@ class EinsumTest(jtu.JaxTestCase): self._check(s, x, y) def test_tf_unsupported_3(self): - # TODO(mattjj): heisenbug! fails sometimes in python3. opt_einsum bug? - if six.PY3: - return absltest.unittest.skip("py3 failures") - # from https://www.tensorflow.org/api_docs/python/tf/einsum r = rng() x = r.randn(2, 3)
Enable the remaining einsum tests. Fixes #<I>.
py
diff --git a/udiskie/cli.py b/udiskie/cli.py index <HASH>..<HASH> 100644 --- a/udiskie/cli.py +++ b/udiskie/cli.py @@ -51,7 +51,6 @@ def udisks_service_object(clsname, version=None): class _EntryPoint(object): """ Base class for other entry points. - """ def __init__(self, **kwargs): self.__dict__.update(kwargs) @@ -84,7 +83,7 @@ class _EntryPoint(object): """ import udiskie.config - # parse program options: + # parse program options (retrieve log level and config file name): parser = cls.program_options_parser() options, posargs = parser.parse_args(argv) @@ -96,8 +95,10 @@ class _EntryPoint(object): fmt = '%(message)s' logging.basicConfig(level=log_level, format=fmt) - # parse config options: + # parse config options (reparse to get the real values now): config = udiskie.config.Config.from_config_file(options.config_file) + parser.set_defaults(**config.program_options) + options, posargs = parser.parse_args(argv) return cls.create(config, options, posargs).run(options, posargs)
Use the config file program options as defaults In a first run of the optparse.OptionParser the name of the config file is determined. In the second run the parser is initialized with the default values from the config file.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -22,13 +22,13 @@ install_requires = [ setup( name='figgypy', - version='0.2.5', + version='0.3.dev', description='Simple configuration tool. Get config from yaml, json, or xml.', long_description=readme, author='Herkermer Sherwood', author_email='[email protected]', url='https://github.com/theherk/figgypy', - download_url='https://github.com/theherk/figgypy/archive/0.2.5.zip', + download_url='https://github.com/theherk/figgypy/archive/0.3.dev.zip', packages=find_packages(), platforms=['all'], license='MIT',
Update setup.py to develop version
py
diff --git a/graphcommons.py b/graphcommons.py index <HASH>..<HASH> 100644 --- a/graphcommons.py +++ b/graphcommons.py @@ -122,7 +122,7 @@ class Graph(Entity): def sync(self, graph_commons): """Synchronize local and remote representations.""" if self['id'] is None: - return {} + return remote_graph = graph_commons.graphs(self['id'])
Consistent return value from no-op path
py
diff --git a/core/eolearn/core/eodata_io.py b/core/eolearn/core/eodata_io.py index <HASH>..<HASH> 100644 --- a/core/eolearn/core/eodata_io.py +++ b/core/eolearn/core/eodata_io.py @@ -298,11 +298,17 @@ class FeatureIO: return np.save(file, data) if file_format is MimeType.GPKG: - # Temporary workaround until GeoPandas 0.11 is released layer = fs.path.basename(self.path) try: - return data.to_file(file, driver="GPKG", encoding="utf-8", layer=layer) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + message="You are attempting to write an empty DataFrame to file*", + category=UserWarning, + ) + return data.to_file(file, driver="GPKG", encoding="utf-8", layer=layer, index=False) except ValueError as err: + # This workaround is only required for geopandas<0.11.0 and will be removed in the future. if data.empty: schema = infer_schema(data) return data.to_file(file, driver="GPKG", encoding="utf-8", layer=layer, schema=schema)
Fixed an issue with index column being saved for empty dataframes for geopandas <I> and suppressed a warning
py
diff --git a/tests/laser/transaction/create_transaction_test.py b/tests/laser/transaction/create_transaction_test.py index <HASH>..<HASH> 100644 --- a/tests/laser/transaction/create_transaction_test.py +++ b/tests/laser/transaction/create_transaction_test.py @@ -41,7 +41,10 @@ def test_sym_exec(): ) sym = SymExecWrapper( - contract, address=(util.get_indexed_address(0)), strategy="dfs" + contract, + address=(util.get_indexed_address(0)), + strategy="dfs", + execution_timeout=10, ) issues = fire_lasers(sym)
Fix the timelimit of calls.sol execution
py
diff --git a/suds/sax/element.py b/suds/sax/element.py index <HASH>..<HASH> 100644 --- a/suds/sax/element.py +++ b/suds/sax/element.py @@ -783,7 +783,7 @@ class Element(UnicodeMixin): """ if ns is None: return - if not isinstance(ns, (tuple, list)): + if not isinstance(ns, (list, tuple)): raise Exception("namespace must be a list or a tuple") if ns[0] is None: self.expns = ns[1] @@ -853,11 +853,11 @@ class Element(UnicodeMixin): """ s = [] - myns = (None, self.expns) + myns = None, self.expns if self.parent is None: pns = Namespace.default else: - pns = (None, self.parent.expns) + pns = None, self.parent.expns if myns[1] != pns[1]: if self.expns is not None: s.append(' xmlns="%s"' % (self.expns,))
make minor coding style details consistent (stylistic)
py
diff --git a/allennlp/commands/elmo.py b/allennlp/commands/elmo.py index <HASH>..<HASH> 100644 --- a/allennlp/commands/elmo.py +++ b/allennlp/commands/elmo.py @@ -97,7 +97,8 @@ class Elmo(Subcommand): subparser = parser.add_parser( name, description=description, help='Create word vectors using a pretrained ELMo model.') - subparser.add_argument('input_file', type=argparse.FileType('r'), help='The path to the input file.') + subparser.add_argument('input_file', type=argparse.FileType('r', encoding='utf-8'), + help='The path to the input file.') subparser.add_argument('output_file', type=str, help='The path to the output file.') group = subparser.add_mutually_exclusive_group(required=True)
Fixed ELMO command's lack of encoding specification when reading from… (#<I>) Fixes #<I> Running the "allennlp elmo" command might produce sequences of length different than the original intended one. The issue lies without not being able to specify an encoding type with which to read the input file, which will have the argparse.FileType('r') reader interpret some unicode characters as 0 length white spaces.
py
diff --git a/motor/__init__.py b/motor/__init__.py index <HASH>..<HASH> 100644 --- a/motor/__init__.py +++ b/motor/__init__.py @@ -54,7 +54,7 @@ if pymongo.version != expected_pymongo_version: "Motor %s requires PyMongo at exactly version %s. " "You have PyMongo %s. " "Do pip install " - "git+git://github.com/mongodb/mongo-python-driver.git@49ff70c3" + "git+git://github.com/mongodb/mongo-python-driver.git@f78716" ) % (version, expected_pymongo_version, pymongo.version) raise ImportError(msg)
Update PyMongo dependency to f<I>. This commit includes a fix for PYTHON-<I>, a GridOut bug.
py
diff --git a/consul/base.py b/consul/base.py index <HASH>..<HASH> 100755 --- a/consul/base.py +++ b/consul/base.py @@ -878,7 +878,7 @@ class Consul(object): if tags: payload['tags'] = tags if meta: - payload['meta'] = meta + payload['meta'] = {str(k): str(v) for (k, v) in meta.items()} if check: payload['check'] = check
Service metadata should be in form map<string|string>
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -36,7 +36,7 @@ if os.environ.get("CI_VERSION_SUFFIX"): c_sources = [] for root, dirs, files in os.walk("c"): for name in files: - if name.endswith(".c"): + if name.endswith(".c") or name.endswith(".cpp"): c_sources.append(os.path.join(root, name)) # Find python source directories @@ -100,7 +100,10 @@ os.environ["LLVM_CONFIG"] = llvm_config #------------------------------------------------------------------------------- # Settings for building the extension #------------------------------------------------------------------------------- -extra_compile_args = ["-std=gnu++11"] +extra_compile_args = ["-std=gnu++11", "-stdlib=libc++"] + +# Include path to C++ header files +extra_compile_args += ["-I" + os.environ["LLVM4"] + "/include/c++/v1"] # This flag becomes C-level macro DTPY, which indicates that we are compiling # (Py)datatable. This is used for example in fread.c to distinguish between
Tweak some compilation flags to ensure that .cpp files can be compiled properly (#<I>)
py
diff --git a/pylint/checkers/format.py b/pylint/checkers/format.py index <HASH>..<HASH> 100644 --- a/pylint/checkers/format.py +++ b/pylint/checkers/format.py @@ -81,7 +81,7 @@ MSGS = { 'Used when a line is longer than a given number of characters.'), 'C0302': ('Too many lines in module (%s/%s)', # was W0302 'too-many-lines', - 'Used when a module has too much lines, reducing its readability.' + 'Used when a module has too many lines, reducing its readability.' ), 'C0303': ('Trailing whitespace', 'trailing-whitespace',
Change 'much' to 'many' for consistency
py
diff --git a/user_messages/models.py b/user_messages/models.py index <HASH>..<HASH> 100644 --- a/user_messages/models.py +++ b/user_messages/models.py @@ -20,7 +20,7 @@ class Thread(models.Model): @property @cached_attribute def latest_message(self): - return self.messages.all()[0] + return self.messages.order_by('-sent_at')[0] class UserThread(models.Model): @@ -42,7 +42,7 @@ class Message(models.Model): objects = MessageManager() class Meta: - ordering = ('-sent_at',) + ordering = ('sent_at',) @models.permalink def get_absolute_url(self):
revert the message ordering fix and put it localized
py
diff --git a/hamster/db.py b/hamster/db.py index <HASH>..<HASH> 100644 --- a/hamster/db.py +++ b/hamster/db.py @@ -16,7 +16,7 @@ class Storage(hamster.storage.Storage): def __change_category(self, id, category_id): query = "SELECT max(activity_order) + 1 FROM activities WHERE category_id = ?" - max_order = self.fetchone(query, (category_id, ))[0] + max_order = self.fetchone(query, (category_id, ))[0] or 1 statement = """ UPDATE activities
found and fixed bug on moving activities in newly created category svn path=/trunk/; revision=<I>
py
diff --git a/owncloud/__init__.py b/owncloud/__init__.py index <HASH>..<HASH> 100644 --- a/owncloud/__init__.py +++ b/owncloud/__init__.py @@ -467,6 +467,34 @@ class Client(): ) raise ResponseError(res) + def get_config(self): + """Returns ownCloud config information as JSON + :returns: JSON object with config information + e.g. {'website': 'ownCloud', 'ssl': 'false', 'host': 'cloud.example.com', 'version': '1.7', 'contact': ''} + :raises: ResponseError in case an HTTP error status was returned + """ + path = 'config' + res = self.__make_ocs_request( + 'GET', + '', + path + ) + if res.status_code == 200: + tree = ET.fromstring(res.text) + self.__check_ocs_status(tree) + values = [] + + element = tree.find('data') + if element != None: + keys = [ 'version', 'website', 'host', 'contact', 'ssl' ] + for key in keys: + text = element.find(key).text or '' + values.append(text) + return dict(zip(keys, values)) + else: + return None + raise ResponseError(res) + def get_attribute(self, app = None, key = None): """Returns an application attribute
Added function get_config() - the name speaks for itself
py
diff --git a/fades/main.py b/fades/main.py index <HASH>..<HASH> 100644 --- a/fades/main.py +++ b/fades/main.py @@ -257,6 +257,8 @@ def go(argv): if create_venv: # Check if the requested packages exists in pypi. if not args.no_precheck_availability: + logger.info("Checking the availabilty of dependencies in PyPI. " + "You can use '--no_precheck_availability' to avoid it.") if not helpers.check_pypi_exists(indicated_deps): logger.error("An indicated dependency doesn't exists. Exiting") sys.exit(1)
add a INFO log advertising the user we are hitting the network
py
diff --git a/shapefile.py b/shapefile.py index <HASH>..<HASH> 100644 --- a/shapefile.py +++ b/shapefile.py @@ -214,6 +214,9 @@ class Shape(object): @property def __geo_interface__(self): + if not self.parts or not self.points: + Exception('Invalid shape, cannot create GeoJSON representation. Shape type is "%s" but does not contain any parts and/or points.' % self.shapeType) + if self.shapeType in [POINT, POINTM, POINTZ]: return { 'type': 'Point', @@ -283,6 +286,8 @@ class Shape(object): 'type': 'MultiPolygon', 'coordinates': polys } + else: + raise Exception('Shape type "%s" cannot be represented as GeoJSON.' % self.shapeType) class ShapeRecord(object): """A ShapeRecord object containing a shape along with its attributes."""
Better handling of shape __geo_interface__ exceptions Based on and fixes #<I>.
py
diff --git a/groupy/api/groups.py b/groupy/api/groups.py index <HASH>..<HASH> 100644 --- a/groupy/api/groups.py +++ b/groupy/api/groups.py @@ -39,6 +39,22 @@ class Groups(base.Manager): return pagers.GroupList(self, self._raw_list, page=page, per_page=per_page, omit=omit) + def list_all(self, per_page=10, omit=None): + """List all groups. + + Since the order of groups is determined by recent activity, this is the + recommended way to obtain a list of all groups. See + :func:`~groupy.api.groups.Groups.list` for details about ``omit``. + + :param int per_page: number of groups per page + :param int omit: a comma-separated list of fields to exclude + :return: a list of groups + :rtype: :class:`~groupy.pagers.GroupList` + """ + pager = pagers.GroupList(self, self._raw_list, page=1, + per_page=per_page, omit=omit) + return pager.autopage() + def list_former(self): """List all former groups.
Duh. Provide the easy way to list all groups
py
diff --git a/usb/backend/libusb1.py b/usb/backend/libusb1.py index <HASH>..<HASH> 100644 --- a/usb/backend/libusb1.py +++ b/usb/backend/libusb1.py @@ -42,14 +42,14 @@ __all__ = [ 'LIBUSB_ERROR_INTERRUPTED', 'LIBUSB_ERROR_NO_MEM', 'LIBUSB_ERROR_NOT_SUPPORTED', - 'LIBUSB_ERROR_OTHER' + 'LIBUSB_ERROR_OTHER', 'LIBUSB_TRANSFER_COMPLETED', 'LIBUSB_TRANSFER_ERROR', 'LIBUSB_TRANSFER_TIMED_OUT', 'LIBUSB_TRANSFER_CANCELLED', 'LIBUSB_TRANSFER_STALL', 'LIBUSB_TRANSFER_NO_DEVICE', - 'LIBUSB_TRANSFER_OVERFLOW' + 'LIBUSB_TRANSFER_OVERFLOW', ] _logger = logging.getLogger('usb.backend.libusb1')
Add missing comma in string list (#<I>) In Python, two adjacent strings get concatenated implicitely. Missing commas in multi-line string lists is a common source of bugs causing unwanted string concatenation. In this case, it is clear that this comma is missing by mistake and there should not be a concatenation.
py
diff --git a/mambustruct.py b/mambustruct.py index <HASH>..<HASH> 100644 --- a/mambustruct.py +++ b/mambustruct.py @@ -38,8 +38,11 @@ class RequestsCounter(object): cls.requests.append(temp) cls.cnt += 1 def reset(cls): - cls.requests = [cls.requests.pop()] - cls.cnt = 1 + cls.cnt = 0 + try: + cls.requests = [cls.requests.pop()] + except IndexError: + pass # Habilita iteracion sobre estructuras Mambu class MambuStructIterator:
Fix mambustruct.RequestCounter.reset method. It should reset to 0 It should not fail when there's no previous count made
py
diff --git a/fedmsg/consumers/__init__.py b/fedmsg/consumers/__init__.py index <HASH>..<HASH> 100644 --- a/fedmsg/consumers/__init__.py +++ b/fedmsg/consumers/__init__.py @@ -81,7 +81,8 @@ class FedmsgConsumer(moksha.hub.api.consumer.Consumer): # This call "completes" registration of this consumer with the hub. super(FedmsgConsumer, self).__init__(hub) - self.validate_signatures = self.hub.config['validate_signatures'] + if self.validate_signatures: + self.validate_signatures = self.hub.config['validate_signatures'] def validate(self, message): """ This needs to raise an exception, caught by moksha. """
If validate signatures is turned off, we sholdn't try to pull them from the config
py
diff --git a/openquake/baselib/runtests.py b/openquake/baselib/runtests.py index <HASH>..<HASH> 100644 --- a/openquake/baselib/runtests.py +++ b/openquake/baselib/runtests.py @@ -50,6 +50,8 @@ class TestResult(unittest.TextTestResult): f.write('%s %s\n' % (name, value)) print(''.join(open(fname).readlines()[:20])) print('Saved times in ' + fname) + if self.errors or self.failures: + raise SystemExit(len(self.errors) + len(self.failures)) unittest.TextTestRunner.resultclass = TestResult
runtests must exit with an error code > 0 if there were errors/failures
py
diff --git a/mock.py b/mock.py index <HASH>..<HASH> 100644 --- a/mock.py +++ b/mock.py @@ -499,7 +499,7 @@ magic_methods = ( "complex int float index " ) -numerics = "add sub mul div truediv floordiv mod lshift rshift and xor or " +numerics = "add sub mul div truediv floordiv mod lshift rshift and xor or pow " inplace = ' '.join('i%s' % n for n in numerics.split()) right = ' '.join('r%s' % n for n in numerics.split()) extra = '' @@ -511,10 +511,12 @@ else: # not including __prepare__, __instancecheck__, __subclasscheck__ # (as they are metaclass methods) +# __del__ is not supported at all as it causes problems if it exists _non_defaults = set('__%s__' % method for method in [ - 'cmp', 'getslice', 'setslice', 'coerce', - 'dir', 'format', 'get', 'set', 'delete' + 'cmp', 'getslice', 'setslice', 'coerce', 'subclasses', + 'dir', 'format', 'get', 'set', 'delete', 'reversed', + 'missing', ]) def get_method(name, func):
Reorganise magic methods slightly.
py
diff --git a/fedmsg/commands/tail.py b/fedmsg/commands/tail.py index <HASH>..<HASH> 100644 --- a/fedmsg/commands/tail.py +++ b/fedmsg/commands/tail.py @@ -237,7 +237,9 @@ class TailCommand(BaseCommand): if not packages.intersection(actual): continue - self.log.info(formatter(message)) + output = formatter(message) + if output: + self.log.info(output) def tail():
Only output if there is something to output.
py
diff --git a/alot/db/manager.py b/alot/db/manager.py index <HASH>..<HASH> 100644 --- a/alot/db/manager.py +++ b/alot/db/manager.py @@ -242,6 +242,16 @@ class DBManager: return db.count_messages(querystring, exclude_tags=settings.get('exclude_tags')) + def collect_tags(self, querystring): + """returns tags of messages that match `querystring`""" + db = Database(path=self.path, mode=Database.MODE.READ_ONLY) + tagset = notmuch2._tags.ImmutableTagSet( + db.messages(querystring, + exclude_tags=settings.get('exclude_tags')), + '_iter_p', + notmuch2.capi.lib.notmuch_messages_collect_tags) + return [t for t in tagset] + def count_threads(self, querystring): """returns number of threads that match `querystring`""" db = Database(path=self.path, mode=Database.MODE.READ_ONLY)
db/manager: provide collect_tags() This used to be available in the old notmuch bindings. Use direct access with the new cffi bindings. The request to upstream for exposing this library function is pending.
py
diff --git a/bulbs/indexable/management/commands/synces.py b/bulbs/indexable/management/commands/synces.py index <HASH>..<HASH> 100644 --- a/bulbs/indexable/management/commands/synces.py +++ b/bulbs/indexable/management/commands/synces.py @@ -24,11 +24,10 @@ class Command(NoArgsCommand): for index, mappings in indexes.items(): try: es.create_index(index, settings={ - "mappings": mappings, "settings": settings.ES_SETTINGS }) except IndexAlreadyExistsError: - pass + es.update_settings(index, settings.ES_SETTINGS) except ElasticHttpError as e: self.stderr.write("ES Error: %s" % e.error)
Create index mappings independently of index, just update settings?
py
diff --git a/seriously/SeriouslyCommands.py b/seriously/SeriouslyCommands.py index <HASH>..<HASH> 100755 --- a/seriously/SeriouslyCommands.py +++ b/seriously/SeriouslyCommands.py @@ -1030,7 +1030,7 @@ def caret_fn(srs): def divisors_fn(srs): a = srs.pop() - srs.push((x for x in range(1, a) if a%x==0)) + srs.push([x for x in range(1, a+1) if a%x==0]) def chunk_len_fn(srs): a = srs.pop() @@ -1218,7 +1218,7 @@ fn_table={ 0xBE:lambda x:x.push(get_reg(1)), 0xBF:lambda x:set_reg(x.pop(),x.pop()), 0xC0:lambda x:x.push(get_reg(x.pop())), - 0xC2:lambda x:x.push(zip(*x.pop())), + 0xC2:lambda x:x.push(list(zip(*x.pop()))), 0xC3:lambda x:x.push(binrep(x.pop())), 0xC4:lambda x:x.push(hexrep(x.pop())), 0xC5:dupe_each_fn,
fix divisors and transpose so that they push lists
py
diff --git a/elabapy/Manager.py b/elabapy/Manager.py index <HASH>..<HASH> 100644 --- a/elabapy/Manager.py +++ b/elabapy/Manager.py @@ -57,7 +57,7 @@ class Manager(BaseAPI): """ Get an uploaded file from ID """ - retrn self.get_data("uploads/" + str(id)) + return self.get_data("uploads/" + str(id)) def get_status(self): """
Fixed typo in get_upload function.
py
diff --git a/osfclient/models/storage.py b/osfclient/models/storage.py index <HASH>..<HASH> 100644 --- a/osfclient/models/storage.py +++ b/osfclient/models/storage.py @@ -18,8 +18,9 @@ class Storage(OSFCore): self.node = self._get_attribute(storage, 'attributes', 'node') self.provider = self._get_attribute(storage, 'attributes', 'provider') - files = ['relationships', 'files', 'links', 'related', 'href'] - self._files_url = self._get_attribute(storage, *files) + self._files_key = ('relationships', 'files', 'links', 'related', + 'href') + self._files_url = self._get_attribute(storage, *self._files_key) def __str__(self): return '<Storage [{0}]>'.format(self.id) @@ -35,7 +36,6 @@ class Storage(OSFCore): if kind == 'file': yield File(file) else: - sub_dir_url = ('relationships', 'files', 'links', - 'related', 'href') - url = self._get_attribute(file, *sub_dir_url) + # recurse into a folder and add entries to `files` + url = self._get_attribute(file, *self._files_key) files.extend(self._follow_next(url))
Refactor key to access files from a folder's JSON
py
diff --git a/pylint/message/message_handler_mix_in.py b/pylint/message/message_handler_mix_in.py index <HASH>..<HASH> 100644 --- a/pylint/message/message_handler_mix_in.py +++ b/pylint/message/message_handler_mix_in.py @@ -95,15 +95,11 @@ class MessagesHandlerMixIn: return # msgid is a category? - def category_id(cid): - cid = cid.upper() - if cid in MSG_TYPES: - return cid - return MSG_TYPES_LONG.get(cid) - - catid = category_id(msgid) - if catid is not None: - for _msgid in self.msgs_store._msgs_by_category.get(catid): + category_id = msgid.upper() + if category_id not in MSG_TYPES: + category_id = MSG_TYPES_LONG.get(category_id) + if category_id is not None: + for _msgid in self.msgs_store._msgs_by_category.get(category_id): self._set_msg_status(_msgid, enable, scope, line) return
[pylint.message] category_id does not need to be a function
py
diff --git a/pandas/core/strings.py b/pandas/core/strings.py index <HASH>..<HASH> 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -446,7 +446,7 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True): stacklevel=3, ) - f = lambda x: bool(regex.search(x)) + f = lambda x: regex.search(x) is not None else: if case: f = lambda x: pat in x @@ -818,7 +818,7 @@ def str_match(arr, pat, case=True, flags=0, na=np.nan): regex = re.compile(pat, flags=flags) dtype = bool - f = lambda x: bool(regex.match(x)) + f = lambda x: regex.match(x) is not None return _na_map(f, arr, na, dtype=dtype)
Switch to using is not None (#<I>)
py
diff --git a/can/interfaces/socketcan_ctypes.py b/can/interfaces/socketcan_ctypes.py index <HASH>..<HASH> 100644 --- a/can/interfaces/socketcan_ctypes.py +++ b/can/interfaces/socketcan_ctypes.py @@ -75,7 +75,7 @@ class Bus(BusABC): def send(self, msg): - sendPacket(self.socket, message) + sendPacket(self.socket, msg) log.debug("Loading libc with ctypes...")
#4 Typo in python 2 ctype interface
py
diff --git a/lib/topology_lib_ip/library.py b/lib/topology_lib_ip/library.py index <HASH>..<HASH> 100644 --- a/lib/topology_lib_ip/library.py +++ b/lib/topology_lib_ip/library.py @@ -63,7 +63,7 @@ def _parse_ip_addr_show(raw_result): if not (re_result): # match top two lines for serveral 'always there' variables show_re = ( - r'\s*(?P<os_index>\d+):\s+(?P<dev>\w+):\s+<(?P<falgs_str>.*)?>.*?' + r'\s*(?P<os_index>\d+):\s+(?P<dev>\S+):\s+<(?P<falgs_str>.*)?>.*?' r'mtu\s+(?P<mtu>\d+).+?state\s+(?P<state>\w+).*' r'\s*link/(?P<link_type>\w+)\s+(?P<mac_address>\S+)' )
fixes re Regular expresion for ip addr doesnt work with cases where the interface has a name like `1@if<I>`
py
diff --git a/neural/utils.py b/neural/utils.py index <HASH>..<HASH> 100644 --- a/neural/utils.py +++ b/neural/utils.py @@ -135,7 +135,7 @@ def run(command,products=None,working_directory='.',force_local=False): %s ----------------------- Return code: %d -''' % (command[0],' '.join(command),e.output,e.returncode),level=nl.level.error) +''' % (command[0],' '.join(command),e.output,e.returncode),level=neural.level.error) returncode = e.returncode result = RunResult(out,returncode) if products and returncode==0:
fixed a typo in nl.run
py
diff --git a/pyang/plugin.py b/pyang/plugin.py index <HASH>..<HASH> 100644 --- a/pyang/plugin.py +++ b/pyang/plugin.py @@ -122,15 +122,22 @@ class PyangPlugin(object): return def setup_fmt(self, ctx): - """Modify the Context at setup time. Called for the selected plugin. + """Modify the Context at setup time. Called for the selected + output format plugin. Override this method to modify the Context before the module repository is accessed. """ return - # setup_xform is preferred to setup_fmt for transform plugins - setup_xform = setup_fmt + def setup_xform(self, ctx): + """Modify the Context at setup time. Called for the selected + transform plugin. + + Override this method to modify the Context before the module + repository is accessed. + """ + return def pre_load_modules(self, ctx): """Called for the selected plugin, before any modules are loaded"""
Define separate plugin.setup_xform() method Previously setup_xform was an alias for setup_fmt, which would have allowed transform plugins (erroneously) to implement setup_fmt() rather than setup_xform().
py
diff --git a/install.py b/install.py index <HASH>..<HASH> 100755 --- a/install.py +++ b/install.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python from __future__ import print_function import os import stat
change the install command's hashbang to just python
py
diff --git a/jsonrpc_http/MiddlewareEngine.py b/jsonrpc_http/MiddlewareEngine.py index <HASH>..<HASH> 100644 --- a/jsonrpc_http/MiddlewareEngine.py +++ b/jsonrpc_http/MiddlewareEngine.py @@ -507,7 +507,7 @@ class MiddlewareEngine(object): pylab.gca().set_xticks(range(num_cols)[1::2]) pylab.gca().set_xticklabels(column_names_reordered[1::2], rotation=90, size='small') - pylab.title('feature_z matrix for table: %s' % tablename) + pylab.title('column dependencies for ptable: %s' % tablename) pylab.savefig(full_filename) def dump_db(self, filename, dir=S.path.web_resources_dir):
change title of feature_z matrix plot
py
diff --git a/test/test_zippack.py b/test/test_zippack.py index <HASH>..<HASH> 100644 --- a/test/test_zippack.py +++ b/test/test_zippack.py @@ -101,7 +101,8 @@ def test_create_and_open(writable_filename): part = pack['/test/part.xml'] assert part.data == '<test>hi there</test>'.encode('ascii') rendered_children = io.StringIO() - print(pack.relationships.children, file=rendered_children) + out = six.text_type(pack.relationships.children) + print(out, file=rendered_children) relations = pack.related('http://polimetrix.com/relationships/test') assert len(relations) == 1 assert relations[0] == part
Tests pass on Python 2 again.
py
diff --git a/pip_utils/outdated.py b/pip_utils/outdated.py index <HASH>..<HASH> 100644 --- a/pip_utils/outdated.py +++ b/pip_utils/outdated.py @@ -156,7 +156,6 @@ class ListCommand(object): editables_only=options.get('editable'), ) for dist in installed_packages: - typ = 'unknown' all_candidates = finder.find_all_candidates(dist.key) if not options.get('pre'): # Remove prereleases
Remove (presumably) unnecessary declaration of typ with a default value
py
diff --git a/urbansim/models/yamlmodelrunner.py b/urbansim/models/yamlmodelrunner.py index <HASH>..<HASH> 100644 --- a/urbansim/models/yamlmodelrunner.py +++ b/urbansim/models/yamlmodelrunner.py @@ -161,7 +161,11 @@ def lcm_simulate(choosers, locations, cfgname, outdf, output_fname): """ print "Running location choice model simulation\n" cfg = misc.config(cfgname) - lcm = MNLLocationChoiceModel.from_yaml(str_or_buffer=cfg) + model_type = yaml.load(open(cfg))["model_type"] + if model_type == "locationchoice": + lcm = MNLLocationChoiceModel.from_yaml(str_or_buffer=cfg) + elif model_type == "segmented_locationchoice": + lcm = SegmentedMNLLocationChoiceModel.from_yaml(str_or_buffer=cfg) movers = choosers[choosers[output_fname].isnull()] new_units = lcm.predict(movers, locations) print "Assigned %d choosers to new units" % len(new_units.index)
update lcm_simulate in yamlmodelrunner
py
diff --git a/nosedjangotests/polls/tests/transaction_tester.py b/nosedjangotests/polls/tests/transaction_tester.py index <HASH>..<HASH> 100644 --- a/nosedjangotests/polls/tests/transaction_tester.py +++ b/nosedjangotests/polls/tests/transaction_tester.py @@ -1,4 +1,7 @@ -from django.db.transaction import atomic +try: + from django.db.transaction import atomic +except ImportError: + from django.db.transaction import commit_on_success as atomic @atomic
refs #<I> dj<I> try except for atomic
py
diff --git a/dataviews/ndmapping.py b/dataviews/ndmapping.py index <HASH>..<HASH> 100644 --- a/dataviews/ndmapping.py +++ b/dataviews/ndmapping.py @@ -372,8 +372,9 @@ class NdIndexableMapping(param.Parameterized): return repr(self) - def dim_max(self, dim): - return np.max([k[self.dim_index(dim)] for k in self.keys()]) + def dim_range(self, dim): + dim_values = [k[self.dim_index(dim)] for k in self._data.keys()] + return np.min(dim_values), np.max(dim_values) @property
Replaced dim_max with dim_range
py
diff --git a/src/flags.py b/src/flags.py index <HASH>..<HASH> 100644 --- a/src/flags.py +++ b/src/flags.py @@ -25,6 +25,8 @@ __license__ = 'MIT' def unique(flags_class): """ A decorator for flags classes to forbid flag aliases. """ + if not _is_flags_class_final(flags_class): + raise TypeError('unique check can be applied only to flags classes that have members') if not flags_class.__member_aliases__: return flags_class aliases = ', '.join('%s -> %s' % (alias, name) for alias, name in flags_class.__member_aliases__.items())
nice error message when someone applies @unique to a non-final flags class
py
diff --git a/simuvex/s_run.py b/simuvex/s_run.py index <HASH>..<HASH> 100644 --- a/simuvex/s_run.py +++ b/simuvex/s_run.py @@ -95,10 +95,10 @@ class SimRun(object): # Categorize and add a sequence of refs to this run def add_actions(self, *refs): for r in refs: - if o.SYMBOLIC not in self.initial_state.options and r.is_symbolic(): - continue - - self._actions.append(r) + self.state.log._add_event(r) + # if o.SYMBOLIC not in self.initial_state.options and r.is_symbolic(): + # continue + # self._actions.append(r) # Categorize and add a sequence of exits to this run def add_exits(self, *exits):
don't filter actions on symbolicity for now
py
diff --git a/src/deploy.py b/src/deploy.py index <HASH>..<HASH> 100644 --- a/src/deploy.py +++ b/src/deploy.py @@ -1,13 +1,14 @@ from error import FileNotFoundError, WrongFormatError, MissingKeyError, RemoveFolderError, FileNotWritableError import json import os -from shutil import copytree, rmtree +from shutil import rmtree +import distutils.core class Deploy: def __init__(self, with_dizmo): self._with_dizmo = with_dizmo - self._cwd = os.getCwd() + self._cwd = os.getcwd() try: config_file = open(self._cwd + '/config.json') @@ -53,6 +54,6 @@ class Deploy: raise RemoveFolderError('Could not remove the existing deployment path.') try: - copytree(self._build_path, self._deployment_path) + distutils.dir_util.copy_tree(self._build_path, self._deployment_path) except: - raise FileNotWritableError('Could not copy all the libraries.') + raise FileNotWritableError('Could not copy the build directory to the deployment path.')
fixed deployment class - added distutils to use copy_tree for better copying of the build output - fixed os.getcwd() typo - fixed description for copy failure
py
diff --git a/{{cookiecutter.repo_name}}/config/settings/common.py b/{{cookiecutter.repo_name}}/config/settings/common.py index <HASH>..<HASH> 100644 --- a/{{cookiecutter.repo_name}}/config/settings/common.py +++ b/{{cookiecutter.repo_name}}/config/settings/common.py @@ -256,11 +256,14 @@ LOGGING = { } } {% if cookiecutter.use_celery == "y" %} -########## CELERY +# ######### CELERY INSTALLED_APPS += ('{{cookiecutter.repo_name}}.taskapp.celery.CeleryConfig',) # if you are not using the django database broker (e.g. rabbitmq, redis, memcached), you can remove the next line. INSTALLED_APPS += ('kombu.transport.django',) BROKER_URL = env("CELERY_BROKER_URL", default='django://') -########## END CELERY +CELERY_ACCEPT_CONTENT = ['json'] +CELERY_TASK_SERIALIZER = 'json' +CELERY_RESULT_SERIALIZER = 'json' +# ######### END CELERY {% endif %} # Your common stuff: Below this line define 3rd party library settings
Switch celery default serialization to JSON, to avoid pickle related deprecation warnings
py
diff --git a/quilt/utils.py b/quilt/utils.py index <HASH>..<HASH> 100644 --- a/quilt/utils.py +++ b/quilt/utils.py @@ -23,6 +23,7 @@ import os import os.path +import shutil import subprocess from quilt.error import QuiltError @@ -113,6 +114,10 @@ class Directory(object): subdirectories """ return self._content(self.dirname) + def delete(self): + """ Delete the directory and its content """ + shutil.rmtree(self.dirname) + def __add__(self, other): if isinstance(other, Directory): return Directory(os.path.join(self.dirname, other.dirname))
Add Directory delete method The Directory delete methods deletes the directory and all its content (subdirs and containing files) from the filesystem.
py
diff --git a/mpds_client/retrieve_MPDS.py b/mpds_client/retrieve_MPDS.py index <HASH>..<HASH> 100755 --- a/mpds_client/retrieve_MPDS.py +++ b/mpds_client/retrieve_MPDS.py @@ -318,7 +318,12 @@ class MPDSDataRetrieval(object): Helper method for representing the MPDS crystal structures in two flavors: either as a Pymatgen Structure object, or as an ASE Atoms object. - Attention! These two flavors are not compatible, e.g. + Attention #1. Disordered structures (i.e. fractional indices in the chemical formulae) + are not supported by this method, and hence the occupancies are not retrieved. + Currently it's up to the user to take care of that (see e.g. + https://doi.org/10.1186/s13321-016-0129-3 etc.). + + Attention #2. Pymatgen and ASE flavors are generally not compatible, e.g. primitive vs. crystallographic cell is defaulted, atoms wrapped or non-wrapped into the unit cell etc. @@ -330,7 +335,6 @@ class MPDSDataRetrieval(object): - basis_noneq - els_noneq e.g. like this: {'S':['cell_abc', 'sg_n', 'setting', 'basis_noneq', 'els_noneq']} - NB. here occupancies are not retrieved. Args: datarow: (list) Required data to construct crystal structure:
A more explicit statement that this client currentlty DOES NOT handle the disordered structures
py
diff --git a/nifstd/nifstd_tools/ontree.py b/nifstd/nifstd_tools/ontree.py index <HASH>..<HASH> 100755 --- a/nifstd/nifstd_tools/ontree.py +++ b/nifstd/nifstd_tools/ontree.py @@ -810,10 +810,8 @@ def main(): sgv.api_key = api_key sgc.api_key = api_key scs = OntTerm.query.services[0] + scs.api_key = api_key scs.setup() - scs.sgg.api_key = api_key - scs.sgv.api_key = api_key - scs.sgc.api_key = api_key app = server(verbose=verbose) app.debug = False
ontree use the 'proper' way of passing the api to ontquery services 'proper' but obviously not correct
py
diff --git a/mrivis/base.py b/mrivis/base.py index <HASH>..<HASH> 100644 --- a/mrivis/base.py +++ b/mrivis/base.py @@ -7,7 +7,7 @@ from matplotlib.image import AxesImage from matplotlib.axis import Axis from collections import Iterable -from mrivis.utils import check_num_slices, check_views, row_wise_rescale +from mrivis.utils import check_num_slices, check_views, row_wise_rescale, read_image from mrivis import config as cfg class SlicePicker(object): @@ -704,8 +704,8 @@ class Carpet(object): Parameters ---------- - image_ND : ndarray - input image from which the carpet needs to be made. + image_ND : ndarray or str + input image, or a path to an image, from which the carpet needs to be made. fixed_dim : int @@ -740,11 +740,13 @@ class Carpet(object): self.carpet = self.carpet[:, ::num_frames_to_skip] def _make_carpet(self, image_ND, fixed_dim, rescale_data): - """Contruscts the carpet from the input image. + """Constructs the carpet from the input image. Optional rescaling of the data. """ + image_ND = read_image(image_ND, bkground_thresh=None) + self.carpet = image_ND.reshape(-1, image_ND.shape[fixed_dim]) if rescale_data: self.carpet = row_wise_rescale(self.carpet)
allowing a path to be supplied
py
diff --git a/jarn/mkrelease/setuptools.py b/jarn/mkrelease/setuptools.py index <HASH>..<HASH> 100644 --- a/jarn/mkrelease/setuptools.py +++ b/jarn/mkrelease/setuptools.py @@ -203,19 +203,19 @@ import pkg_resources from os.path import basename def walk_revctrl(dirname=''): - found = False + finder = False items = [] for ep in pkg_resources.iter_entry_points('setuptools.file_finders'): if %(scmtype)r in ep.name: - found = True + finder = True finder_items = [] - distutils.log.info('using ' + ep.name + ' file-finder') + distutils.log.info('using %%s file-finder', ep.name) for item in ep.load()(dirname): if not basename(item).startswith(('.svn', '.hg', '.git')): finder_items.append(item) distutils.log.info('%%d files found', len(finder_items)) items.extend(finder_items) - if not found: + if not finder: print >>sys.stderr, 'No %(scmtype)s file-finder ' \ '(setuptools-%(scmtype)s extension missing?)' sys.exit(1)
Rename bool found -> finder in walk_revctrl.
py
diff --git a/tests/conftest.py b/tests/conftest.py index <HASH>..<HASH> 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,6 +1,6 @@ """Define global fixtures.""" -from os.path import abspath, join +from os.path import join from pathlib import Path from pickle import load as _load @@ -43,38 +43,6 @@ def create_test_model(model_name="salmonella") -> Model: return _load(infile) -def test_all(args=None): - """Alias for running all unit-tests on installed cobra.""" - if pytest: - args = args if args else [] - - return pytest.main( - [ - str(abspath(join(cobra_directory, "tests"))), - "--benchmark-skip", - "-v", - "-rs", - ] - + args - ) - else: - raise ImportError( - "missing package pytest and pytest_benchmark required for testing" - ) - - -def pytest_addoption(parser): - try: - parser.addoption("--run-slow", action="store_true", help="run slow tests") - parser.addoption( - "--run-non-deterministic", - action="store_true", - help="run tests that sometimes (rarely) fail", - ) - except ValueError: - pass - - @pytest.fixture(scope="session") def data_directory(): return data_dir
removed test_all and pytest_addoption which seem irrelevant
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,6 @@ install_requires = [ 'pyparsing>=1.5.7', 'pytz', 'six', - 'structlog', 'tzlocal', ] @@ -18,6 +17,9 @@ if sys.version_info < (2, 7): install_requires.append('importlib') install_requires.append('logutils') install_requires.append('ordereddict') + install_requires.append('structlog<=16.0.0') +else: + install_requires.append('structlog') with open('README.rst') as f: long_description = f.read()
Don't use structlog versions newer than <I> for python <I>
py
diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py index <HASH>..<HASH> 100644 --- a/pandas/stats/moments.py +++ b/pandas/stats/moments.py @@ -97,7 +97,7 @@ ignore_na : boolean, default False _ewm_notes = r""" Notes ----- -Either center of mass or span must be specified +Either center of mass, span or halflife must be specified EWMA is sometimes specified using a "span" parameter `s`, we have that the decay parameter :math:`\alpha` is related to the span as
DOC: Included halflife as one 3 optional params that must be specified
py
diff --git a/cellpy/utils/ica.py b/cellpy/utils/ica.py index <HASH>..<HASH> 100644 --- a/cellpy/utils/ica.py +++ b/cellpy/utils/ica.py @@ -248,7 +248,7 @@ class Converter(object): """perform the dq-dv transform""" # NOTE TO ASBJOERN: Probably insert method for "binning" instead of - # TODO: Asbjørn will inster "binning" here + # TODO: Asbjørn will insert "binning" here # differentiating here # (use self.increment_method as the variable for selecting method for)
fix a spelling mistake in one comment...
py
diff --git a/bitmerchant/bip32/wallet.py b/bitmerchant/bip32/wallet.py index <HASH>..<HASH> 100644 --- a/bitmerchant/bip32/wallet.py +++ b/bitmerchant/bip32/wallet.py @@ -147,6 +147,24 @@ class Wallet(wallet.Wallet): return eq + def __ne__(self, other): + return not self.__eq__(other) + + def _no_ordering(self): + raise TypeError("Objects of this type have no ordering") + + def __lt__(self, other): + self._no_ordering() + + def __le__(self, other): + self._no_ordering() + + def __gt__(self, other): + self._no_ordering() + + def __ge__(self, other): + self._no_ordering() + class PrivateKeyException(Exception): """Exception for problems with a private key."""
Don't allow comparisons other than == & !=
py
diff --git a/salt/modules/pip.py b/salt/modules/pip.py index <HASH>..<HASH> 100644 --- a/salt/modules/pip.py +++ b/salt/modules/pip.py @@ -1143,7 +1143,7 @@ def list_upgrades(bin_env=None, packages = {} for line in result['stdout'].splitlines(): - match = re.search(r'(\S*)\s+\(.*Latest:\s+(.*)\)', line) + match = re.search(r'(\S*)\s+.*Latest:\s+(.*)', line) if match: name, version_ = match.groups() else:
Correct pip parsing for lines like: protobuf (<I>) - Latest: <I> [wheel]
py
diff --git a/juicer/juicer/Juicer.py b/juicer/juicer/Juicer.py index <HASH>..<HASH> 100644 --- a/juicer/juicer/Juicer.py +++ b/juicer/juicer/Juicer.py @@ -191,8 +191,15 @@ class Juicer(object): return cart def show(self, cart_name): - cart = juicer.common.Cart.Cart(cart_name) - cart.load(cart_name) + # use local cart if present + # otherwise use mongo version + cart_file = os.path.join(Constants.CART_LOCATION, '%s.json' % cart_name) + if os.path.exists(cart_file): + cart = juicer.common.Cart.Cart(cart_name) + cart.load(cart_name) + else: + cln = juicer.utils.get_login_info()[1]['start_in'] + cart = juicer.utils.cart_db()[cln].find_one({'_id': cart_name}) return str(cart) def list(self, cart_glob=['*.json']):
show remote carts for #<I> next up, globbing!
py
diff --git a/mythril/analysis/symbolic.py b/mythril/analysis/symbolic.py index <HASH>..<HASH> 100644 --- a/mythril/analysis/symbolic.py +++ b/mythril/analysis/symbolic.py @@ -96,10 +96,10 @@ class SymExecWrapper: raise ValueError("Invalid strategy argument supplied") creator_account = Account( - hex(ACTORS.creator.value), "", dynamic_loader=dynloader, contract_name=None + hex(ACTORS.creator.value), "", dynamic_loader=None, contract_name=None ) attacker_account = Account( - hex(ACTORS.attacker.value), "", dynamic_loader=dynloader, contract_name=None + hex(ACTORS.attacker.value), "", dynamic_loader=None, contract_name=None ) requires_statespace = (
Virtual account don't need dynloader
py
diff --git a/indra/tools/live_curation.py b/indra/tools/live_curation.py index <HASH>..<HASH> 100644 --- a/indra/tools/live_curation.py +++ b/indra/tools/live_curation.py @@ -155,6 +155,15 @@ class Corpus(object): logger.exception('Failed to put on s3: %s' % e) return None + @staticmethod + def _s3_put_file(s3, key, json_obj, bucket=default_bucket): + """Does the json.dumps operation for the the upload, i.e. json_obj + must be an object that can be turned into a bytestring using + json.dumps""" + logger.info('Uploading %s to S3' % key) + s3.put_object(Body=json.dumps(json_obj), + Bucket=bucket, Key=key) + def _save_to_cache(self, raw=None, sts=None, cur=None): # Assuming file keys are full s3 keys: # <base_name>/<dirname>/<file>.json
Add helper method uploading single file sto s3
py
diff --git a/yoti_python_sdk/activity_details.py b/yoti_python_sdk/activity_details.py index <HASH>..<HASH> 100644 --- a/yoti_python_sdk/activity_details.py +++ b/yoti_python_sdk/activity_details.py @@ -123,10 +123,11 @@ class ActivityDetails: ][config.KEY_FORMATTED_ADDRESS] def __iter__(self): - yield "user_id", self.user_id + yield "user_id", self.__remember_me_id # Using the private member directly to avoid a deprecation warning yield "parent_remember_me_id", self.parent_remember_me_id yield "outcome", self.outcome yield "receipt_id", self.receipt_id yield "user_profile", self.user_profile yield "profile", self.profile yield "base64_selfie_uri", self.base64_selfie_uri + yield "remember_me_id", self.remember_me_id
SDK-<I>: Adding remember_me_id to iterator in ActivityDetails Preserving the existing user_id key/value pair to avoid a breaking change, but ensuring it bypasses the deprecated getter method, so that using the iterator doesn't generate a spurious deprecation warning
py
diff --git a/doppel/version.py b/doppel/version.py index <HASH>..<HASH> 100644 --- a/doppel/version.py +++ b/doppel/version.py @@ -1 +1 @@ -version = '0.3.0' +version = '0.4.0.dev0'
Update version to <I>.dev0
py
diff --git a/mbuild/tests/base_test.py b/mbuild/tests/base_test.py index <HASH>..<HASH> 100644 --- a/mbuild/tests/base_test.py +++ b/mbuild/tests/base_test.py @@ -162,3 +162,8 @@ class BaseTest: mb.translate(ch['b'], [0, 0.07, 0]) mb.rotate_around_z(ch['b'], -120.0 * (np.pi/180.0)) return ch + + @pytest.fixture + def silane(self): + from mbuild.lib.moieties import Silane + return Silane()
Add pytest fixture for silane
py
diff --git a/pyvex/lift/util/syntax_wrapper.py b/pyvex/lift/util/syntax_wrapper.py index <HASH>..<HASH> 100644 --- a/pyvex/lift/util/syntax_wrapper.py +++ b/pyvex/lift/util/syntax_wrapper.py @@ -211,6 +211,7 @@ class VexValue(object): return self.irsb_c.op_cmp_ult(self.rdt, right.rdt) @checkparams() + @vvifyresults def __mod__(self, right): # Note: nonprimitive return self.irsb_c.op_mod(self.rdt, right.rdt)
Fix VexValue returning RdTmp on __mod__
py
diff --git a/angr/analyses/cfg/cfg_accurate.py b/angr/analyses/cfg/cfg_accurate.py index <HASH>..<HASH> 100644 --- a/angr/analyses/cfg/cfg_accurate.py +++ b/angr/analyses/cfg/cfg_accurate.py @@ -2518,15 +2518,12 @@ class CFGAccurate(ForwardAnalysis, CFGBase): # pylint: disable=abstract-metho symbolic_initial_state = self.project.factory.entry_state(mode='symbolic') if fastpath_state is not None: symbolic_initial_state = self.project._simos.prepare_call_state(fastpath_state, - initial_state=symbolic_initial_state) + initial_state=symbolic_initial_state) - # Create a temporary block - try: - tmp_block = self.project.factory.block(function_addr) - except (simuvex.SimError, AngrError): - return None - - num_instr = tmp_block.instructions - 1 + # Find number of instructions of start block + func = self.project.kb.functions.get(function_addr) + start_block = func._get_block(function_addr) + num_instr = start_block.instructions - 1 symbolic_initial_state.ip = function_addr path = self.project.factory.path(symbolic_initial_state)
Replace use of Factory#block to obtain start block of function.
py
diff --git a/hazelcast/cluster.py b/hazelcast/cluster.py index <HASH>..<HASH> 100644 --- a/hazelcast/cluster.py +++ b/hazelcast/cluster.py @@ -220,7 +220,7 @@ class _InternalClusterService(object): ) current = self._member_list_snapshot - if version >= current.version: + if version > current.version: self._apply_new_state_and_fire_events(current, snapshot) if current is _EMPTY_SNAPSHOT:
Apply membership events only if the memberlist version is greater than the current version (#<I>) We couldn't find a reason to apply membership events with the same memberlist version while evaluating this part of the code base in the Java client. Here, we are doing the same and making the check more strict.
py
diff --git a/librosa/filters.py b/librosa/filters.py index <HASH>..<HASH> 100644 --- a/librosa/filters.py +++ b/librosa/filters.py @@ -364,8 +364,6 @@ def constant_q(sr, fmin=None, n_bins=84, bins_per_octave=12, tuning=0.0, window : function or `None` Windowing function to apply to filters. - If `None`, no window is applied. - Default: `scipy.signal.hann` resolution : float > 0 [scalar] @@ -431,16 +429,15 @@ def constant_q(sr, fmin=None, n_bins=84, bins_per_octave=12, tuning=0.0, lengths.append(ilen) # Build the filter - win = np.exp(Q * 1j * np.linspace(0, 2 * np.pi, ilen, endpoint=False)) + sig = np.exp(1j*2*np.pi*freq*np.arange(ilen, dtype=float)/sr) # Apply the windowing function - if window is not None: - win = win * window(ilen) + sig = sig * window(ilen) # Normalize - win = util.normalize(win, norm=norm) + sig = util.normalize(sig, norm=norm) - filters.append(win) + filters.append(sig) max_len = max(lengths) if pad_fft:
Fix constant-Q frequency drift when rounding window size
py
diff --git a/simuvex/storage/paged_memory.py b/simuvex/storage/paged_memory.py index <HASH>..<HASH> 100644 --- a/simuvex/storage/paged_memory.py +++ b/simuvex/storage/paged_memory.py @@ -263,9 +263,9 @@ class SimPagedMemory(object): start_backer = new_page_addr - addr if isinstance(start_backer, BV): continue - if start_backer < 0 and abs(start_backer) > self._page_size: + if start_backer < 0 and abs(start_backer) >= self._page_size: continue - if start_backer > len(backer): + if start_backer >= len(backer): continue # find permission backer associated with the address, there should be a
Change some < to <=
py
diff --git a/ELiDE/ELiDE/funcsed.py b/ELiDE/ELiDE/funcsed.py index <HASH>..<HASH> 100644 --- a/ELiDE/ELiDE/funcsed.py +++ b/ELiDE/ELiDE/funcsed.py @@ -142,17 +142,19 @@ class FuncsEditor(BoxLayout): def on_storelist(self, *args): self.storelist.bind(selection=self._pull_func) - def _save(self, *args): - if self._text != self.store.plain(self.name): + def save(self, *args): + if not (self.name and self.store): + return + if self.source != self.store.plain(self.name): Logger.debug('saving function {}'.format(self.name)) self.store.set_source(self.name, self.source) - _trigger_save = trigger(_save) + _trigger_save = trigger(save) @trigger def _pull_func(self, *args): - self._save() + self.save() self.ids.funname.text = self.name = self.storelist.selection.name - self.source = self.storelist.selection.source + self.source = self.store.plain(self.name) class FuncsEdBox(BoxLayout):
Get the code editor to save stuff again
py
diff --git a/yadi/dataStructures/element.py b/yadi/dataStructures/element.py index <HASH>..<HASH> 100644 --- a/yadi/dataStructures/element.py +++ b/yadi/dataStructures/element.py @@ -19,11 +19,19 @@ class Variable(Element): class Constant(Element): def __init__(self,value = ''): + + # Check if it is quoted. + + if isinstance(value,str): + if len(value) > 0: + if value[0] != '\'': + value = '\'' + value + '\'' + self.value = value def __hash__(self): return hash(self.value) def __repr__(self): - return self.value + return str(self.value) class Wildcard(Element): def __hash__(self):
Fixed quotation marks at the beggining of constants.
py
diff --git a/angr/vexer.py b/angr/vexer.py index <HASH>..<HASH> 100644 --- a/angr/vexer.py +++ b/angr/vexer.py @@ -142,7 +142,7 @@ class VEXer: l.debug("Creating pyvex.IRSB of arch %s at 0x%x", self.arch.name, addr) if self.use_cache: - cache_key = (buff, addr, num_inst, self.arch.vex_arch, byte_offset, thumb, opt_level) + cache_key = (buff, addr, max_size, num_inst, self.arch.vex_arch, byte_offset, thumb, opt_level) if cache_key in self.irsb_cache: return self.irsb_cache[cache_key]
put max_size into consideration for irsb cache key in vexer.
py
diff --git a/pyicloud/base.py b/pyicloud/base.py index <HASH>..<HASH> 100644 --- a/pyicloud/base.py +++ b/pyicloud/base.py @@ -60,9 +60,9 @@ class PyiCloudService(object): self.session = requests.Session() self.session.verify = verify self.session.headers.update({ - 'host': 'setup.icloud.com', - 'origin': self._home_endpoint, - 'referer': '%s/' % self._home_endpoint, + 'Host': 'setup.icloud.com', + 'Origin': self._home_endpoint, + 'Referer': '%s/' % self._home_endpoint, 'User-Agent': 'Opera/9.52 (X11; Linux i686; U; en)' })
Normalize HTTP header names to title case The spec says they are case insensitive, so this is just for consistency (with eg. Chrome).
py
diff --git a/code/png.py b/code/png.py index <HASH>..<HASH> 100755 --- a/code/png.py +++ b/code/png.py @@ -1333,7 +1333,7 @@ class Reader: """ if ((_guess is not None and len(kw) != 0) or - (_guess is None and len(kw) != 1)): + (_guess is None and len(kw) != 1)): raise TypeError("Reader() takes exactly 1 argument") # Will be the first 8 bytes, later on. See validate_signature. @@ -1847,7 +1847,7 @@ class Reader: def _process_sBIT(self, data): self.sbit = data if (self.colormap and len(data) != 3 or - not self.colormap and len(data) != self.planes): + not self.colormap and len(data) != self.planes): raise FormatError("sBIT chunk has incorrect length.") def _process_pHYs(self, data): @@ -2383,9 +2383,9 @@ def read_pam_header(infile): depth = int(header[DEPTH]) maxval = int(header[MAXVAL]) if (width <= 0 or - height <= 0 or - depth <= 0 or - maxval <= 0): + height <= 0 or + depth <= 0 or + maxval <= 0): raise Error( 'WIDTH, HEIGHT, DEPTH, MAXVAL must all be positive integers') return 'P7', width, height, depth, maxval
Fix flake8 indent problems
py
diff --git a/backoff.py b/backoff.py index <HASH>..<HASH> 100644 --- a/backoff.py +++ b/backoff.py @@ -1,4 +1,5 @@ # coding:utf-8 +from __future__ import unicode_literals """ Function decoration for pluggable backoff and retry @@ -154,7 +155,7 @@ def constant(interval): # Formats a function invocation as a unicode string for logging. def _invoc_repr(f, args, kwargs): - args_out = ", ".join(unicode(a) for a in args) + args_out = ", ".join("%s" % a for a in args) if args and kwargs: args_out += ", " if kwargs:
Add support for python 3 The unicode function is not in python 3 as string literals are by default unicode strings. see: <URL>
py
diff --git a/warehouse/legacy/api/xmlrpc.py b/warehouse/legacy/api/xmlrpc.py index <HASH>..<HASH> 100644 --- a/warehouse/legacy/api/xmlrpc.py +++ b/warehouse/legacy/api/xmlrpc.py @@ -27,7 +27,12 @@ from warehouse.packaging.models import ( ) -pypi_xmlrpc = functools.partial(xmlrpc_method, endpoint="pypi") +pypi_xmlrpc = functools.partial( + xmlrpc_method, + endpoint="pypi", + require_csrf=False, + require_methods=["POST"], +) @view_config(route_name="pypi", context=Exception, renderer="xmlrpc")
Exempt XMLRPC from CSRF and set required methods to POST
py
diff --git a/allauth/account/migrations/0002_email_max_length.py b/allauth/account/migrations/0002_email_max_length.py index <HASH>..<HASH> 100644 --- a/allauth/account/migrations/0002_email_max_length.py +++ b/allauth/account/migrations/0002_email_max_length.py @@ -2,6 +2,9 @@ from __future__ import unicode_literals from django.db import models, migrations +from django.conf import settings + +UNIQUE_EMAIL = getattr(settings, 'ACCOUNT_UNIQUE_EMAIL', True) class Migration(migrations.Migration): @@ -14,6 +17,14 @@ class Migration(migrations.Migration): migrations.AlterField( model_name='emailaddress', name='email', - field=models.EmailField(unique=True, max_length=254, verbose_name='e-mail address'), + field=models.EmailField(unique=UNIQUE_EMAIL, max_length=254, verbose_name='e-mail address'), ), ] + + if not UNIQUE_EMAIL: + operations += [ + migrations.AlterUniqueTogether( + name='emailaddress', + unique_together=set([('user', 'email')]), + ), + ]
New migration does not check ACCOUNT_UNIQUE_EMAIL setting
py
diff --git a/c7n/resources/asg.py b/c7n/resources/asg.py index <HASH>..<HASH> 100644 --- a/c7n/resources/asg.py +++ b/c7n/resources/asg.py @@ -1579,6 +1579,8 @@ class LaunchConfig(query.QueryResourceManager): filter_type = 'list' config_type = 'AWS::AutoScaling::LaunchConfiguration' + retry = staticmethod(get_retry(('Throttling',))) + def get_source(self, source_type): if source_type == 'describe': return DescribeLaunchConfig(self)
asg launch-config - retry on throttle when fetching resources (#<I>)
py
diff --git a/fbchat/models.py b/fbchat/models.py index <HASH>..<HASH> 100644 --- a/fbchat/models.py +++ b/fbchat/models.py @@ -1,8 +1,10 @@ from __future__ import unicode_literals +import sys class Base(): def __repr__(self): - return self.__unicode__().encode('utf-8') + uni = self.__unicode__() + return uni.encode('utf-8') if sys.version_info < (3, 0) else uni def __unicode__(self): return u'<%s %s (%s)>' % (self.type.upper(), self.name, self.url) @@ -16,8 +18,7 @@ class User(Base): self.photo = data['photo'] self.url = data['path'] self.name = data['text'] - #self.score = jsoin['score'] - #self.tokens = data['tokens'] + self.score = data['score'] self.data = data
bugfix to make Users compatible for python2 and 3
py
diff --git a/topiary/commandline_args.py b/topiary/commandline_args.py index <HASH>..<HASH> 100644 --- a/topiary/commandline_args.py +++ b/topiary/commandline_args.py @@ -76,20 +76,26 @@ variant_arg_group.add_argument("--json-variant-files", def variant_collection_from_args(args): variant_collections = [] + + if args.reference_name: + genome = genome_for_reference_name(args.reference_name) + else: + # no genome specified, assume it can be inferred from the file(s) + # we're loading + genome = None + for vcf_path in args.vcf: - vcf_variants = varcode.load_vcf( - vcf_path, - reference_name=args.reference_name) + vcf_variants = varcode.load_vcf(vcf_path, genome=genome) variant_collections.append(vcf_variants) for maf_path in args.maf: maf_variants = varcode.load_maf(maf_path) variant_collections.append(maf_variants) if args.variant: - if not args.reference_name: + if not genome: raise ValueError( "--reference-name must be specified when using --variant") - genome = genome_for_reference_name(args.reference_name) + variants = [ varcode.Variant( chromosome,
pass genome to load_vcf instead of reference_name
py
diff --git a/isc.py b/isc.py index <HASH>..<HASH> 100644 --- a/isc.py +++ b/isc.py @@ -59,5 +59,4 @@ def handler(return_format=None): def infocon(return_format=None): """Returns the current infocon level (green, yellow, orange, red).""" - uri = 'infocon' return _get('infocon', return_format)
Removed an unused line left over from the last commit.
py
diff --git a/lib/svtplay_dl/service/bigbrother.py b/lib/svtplay_dl/service/bigbrother.py index <HASH>..<HASH> 100644 --- a/lib/svtplay_dl/service/bigbrother.py +++ b/lib/svtplay_dl/service/bigbrother.py @@ -12,7 +12,7 @@ from svtplay_dl.log import log from svtplay_dl.fetcher.hds import hdsparse from svtplay_dl.fetcher.hls import hlsparse, HLS -class Bigbrother(Service): +class Bigbrother(Service, OpenGraphThumbMixin): supported_domains = ["bigbrother.se"] def get(self, options):
bigbrother: support for OpenGraphThumbMixin
py
diff --git a/gbdxtools/ipe/interface.py b/gbdxtools/ipe/interface.py index <HASH>..<HASH> 100644 --- a/gbdxtools/ipe/interface.py +++ b/gbdxtools/ipe/interface.py @@ -158,7 +158,7 @@ class DaskProps(object): for x in xrange(img_md['minTileX'], img_md["maxTileX"]+1)} -class Op(DaskMeta, DaskProps): +class Op(DaskProps, DaskMeta): def __init__(self, name, interface=None): self._operator = name self._edges = []
fixing the way ipe Op inherits from DaskProps and DaskMeta
py
diff --git a/thinc/api.py b/thinc/api.py index <HASH>..<HASH> 100644 --- a/thinc/api.py +++ b/thinc/api.py @@ -135,18 +135,18 @@ def with_flatten(layer): return layer.ops.unflatten(X, lengths), finish_update model = layerize(begin_update) model._layers.append(layer) + model.on_data_hooks.append(_with_flatten_on_data) + model.name = 'flatten' return model - - -def _run_child_hooks(model, X, y): +def _with_flatten_on_data(model, X, y): + X = model.ops.flatten(X) for layer in model._layers: for hook in layer.on_data_hooks: hook(layer, X, y) X = layer(X) [email protected]_data(_run_child_hooks) class FunctionLayer(Model): '''Wrap functions into weightless Model instances, for use as network components.'''
Refine the way chlld hooks are run
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -6,6 +6,10 @@ BASE_PKGS=find_packages('src', exclude=['drp', 'drp.*']) NAMESPACE_PKGS = ['numina.pipelines', 'numina.pipelines.emir'] ALL_PKGS = BASE_PKGS + NAMESPACE_PKGS +# There is a problem installing/uninstalling with pip +# pip will uninstall pyemir AND numina +# this is the bug https://github.com/pypa/pip/issues/355 + setup(name='pyemir', version='0.6.6', author='Sergio Pascual',
Added a note about pip uninstalling pyemir AND numina
py