diff
stringlengths
139
3.65k
message
stringlengths
8
627
diff_languages
stringclasses
1 value
diff --git a/tests/qpu/test_dwavesampler.py b/tests/qpu/test_dwavesampler.py index <HASH>..<HASH> 100644 --- a/tests/qpu/test_dwavesampler.py +++ b/tests/qpu/test_dwavesampler.py @@ -149,5 +149,10 @@ class TestClientSelection(unittest.TestCase): self.assertEqual(DWaveSampler(client='hybrid').client, hybrid()) def test_base_client(self): - self.assertIsInstance(DWaveSampler(client=None).client, Client) - self.assertIsInstance(DWaveSampler(client='base').client, Client) + # to test 'base' client instantiation offline, + # we would need a mock client and a mock solver + try: + self.assertEqual(type(DWaveSampler(client=None).client), Client) + self.assertEqual(type(DWaveSampler(client='base').client), Client) + except (ValueError, ConfigFileError): + raise unittest.SkipTest("no API token available")
Fix test for base client selection in DWaveSampler
py
diff --git a/wandb/sdk/lib/ipython.py b/wandb/sdk/lib/ipython.py index <HASH>..<HASH> 100644 --- a/wandb/sdk/lib/ipython.py +++ b/wandb/sdk/lib/ipython.py @@ -9,8 +9,8 @@ logger = logging.getLogger(__name__) TABLE_STYLES = """<style> - table.wandb td:nth-child(1) { padding: 0 10px; text-align: right } - .wandb-row { display: flex; flex-direction: row; flex-wrap: wrap; width: 100% } + table.wandb td:nth-child(1) { padding: 0 10px; text-align: left ; width: auto;} td:nth-child(2) {text-align: left ; width: 100%} + .wandb-row { display: flex; flex-direction: row; flex-wrap: wrap; justify-content: flex-start; width: 100% } .wandb-col { display: flex; flex-direction: column; flex-basis: 100%; flex: 1; padding: 10px; } </style> """
[WB-<I>] Align summary/history in notebook env (#<I>) * fix content alignment
py
diff --git a/openpnm/core/Base.py b/openpnm/core/Base.py index <HASH>..<HASH> 100644 --- a/openpnm/core/Base.py +++ b/openpnm/core/Base.py @@ -175,6 +175,17 @@ class Base(dict): prop = item.replace('pore.', '').replace('throat.', '') self.__setitem__(key+'.'+prop, value[item]) return + # Ensure that 'pore.foo.bar' does not exist before creating 'pore.foo' + for item in self.keys(): + if len(item.split('.')) > 2: + if key == '.'.join(item.split('.')[:2]): + raise Exception(key + ' is already in use as a subdict') + # Ensure that 'pore.foo' does not exist before creating 'pore.foo.bar' + if len(key.split('.')) > 2: + for item in self.keys(): + if '.'.join(key.split('.')[:2]) == item: + raise Exception(item + ' is already in use, cannot make ' + + 'a subdict') value = sp.array(value, ndmin=1) # Convert value to an ndarray
Adding checks to prevent naming like 'pore.foo' and 'pore.foo.bar'
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -63,8 +63,8 @@ DEV_REQUIRES = TEST_REQUIRES + DOCS_REQUIRES + ( # Dev debugging 'ipython', - 'ipdb==0.10.3', - 'ipdbplugin==1.4.5', + 'ipdb', + 'ipdbplugin', # Lint spellchecking, dev only (don't fail CI) 'flake8-spellcheck==0.12.1',
Don't pin dev debug requirements.
py
diff --git a/salt/netapi/rest_tornado/saltnado.py b/salt/netapi/rest_tornado/saltnado.py index <HASH>..<HASH> 100644 --- a/salt/netapi/rest_tornado/saltnado.py +++ b/salt/netapi/rest_tornado/saltnado.py @@ -941,19 +941,12 @@ class SaltAPIHandler(BaseSaltAPIHandler, SaltClientsMixIn): ''' Disbatch runner client commands ''' - timeout = float(chunk.get('timeout', self.application.opts['timeout'])) - # set the timeout - timeout_obj = tornado.ioloop.IOLoop.current().add_timeout(time.time() + timeout, self.timeout_futures) - f_call = {'args': [chunk['fun'], chunk]} pub_data = self.saltclients['runner'](chunk['fun'], chunk) tag = pub_data['tag'] + '/ret' try: event = yield self.application.event_listener.get_event(self, tag=tag) - # if we finish in time, cancel the timeout - tornado.ioloop.IOLoop.current().remove_timeout(timeout_obj) - # only return the return data raise tornado.gen.Return(event['data']['return']) except TimeoutException:
Remove timeout from runner disbatch
py
diff --git a/cmsplugin_zinnia/admin.py b/cmsplugin_zinnia/admin.py index <HASH>..<HASH> 100644 --- a/cmsplugin_zinnia/admin.py +++ b/cmsplugin_zinnia/admin.py @@ -16,7 +16,7 @@ class EntryPlaceholderAdmin(PlaceholderAdminMixin, EntryAdmin): EntryPlaceholder Admin """ fieldsets = ( - (None, {'fields': ('title', 'image', 'status')}), + (None, {'fields': (('title', 'status'), 'image')}), (_('Content'), {'fields': ('content_placeholder',), 'classes': ('plugin-holder', 'plugin-holder-nopage')})) + \
Update the fields in admin accordingly to the new version of Zinnia
py
diff --git a/phono3py/phonon3/__init__.py b/phono3py/phonon3/__init__.py index <HASH>..<HASH> 100644 --- a/phono3py/phonon3/__init__.py +++ b/phono3py/phonon3/__init__.py @@ -44,8 +44,7 @@ from phonopy.harmonic.force_constants import ( set_translational_invariance, set_permutation_symmetry) from phonopy.harmonic.displacement import get_least_displacements -from phonopy.harmonic.displacement import direction_to_displacement as \ - direction_to_displacement_fc2 +from phonopy.harmonic.displacement import directions_to_displacement_dataset from phono3py.version import __version__ from phono3py.phonon3.imag_self_energy import (get_imag_self_energy, write_imag_self_energy) @@ -244,7 +243,7 @@ class Phono3py(object): self._phonon_supercell_symmetry, is_plusminus=is_plusminus, is_diagonal=False) - self._phonon_displacement_dataset = direction_to_displacement_fc2( + self._phonon_displacement_dataset = directions_to_displacement_dataset( phonon_displacement_directions, distance, self._phonon_supercell)
Follow update of phonopy
py
diff --git a/enocean/protocol/constants.py b/enocean/protocol/constants.py index <HASH>..<HASH> 100644 --- a/enocean/protocol/constants.py +++ b/enocean/protocol/constants.py @@ -22,7 +22,7 @@ class RETURN_CODE(IntEnum): OK = 0x00 ERROR = 0x01 NOT_SUPPORTED = 0x02 - WRONG_PARAM = 0x04 + WRONG_PARAM = 0x03 OPERATION_DENIED = 0x04
Mistake in RETURN_CODE
py
diff --git a/docs/conf.py b/docs/conf.py index <HASH>..<HASH> 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -51,6 +51,7 @@ autodoc_mock_imports = [ 'yaml', 'xmltodict', 'wrapt', + 'netaddr', ] autodoc_default_flags = [
Adding netaddr to docs conf
py
diff --git a/doc/conf.py b/doc/conf.py index <HASH>..<HASH> 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -51,7 +51,7 @@ copyright = "2015, Atsushi Togo" # The short X.Y version. version = "2.3" # The full version, including alpha/beta/rc tags. -release = "2.3.0" +release = "2.3.1" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.
Set version <I> of doc/conf.py
py
diff --git a/fontbakery-check-ttf.py b/fontbakery-check-ttf.py index <HASH>..<HASH> 100755 --- a/fontbakery-check-ttf.py +++ b/fontbakery-check-ttf.py @@ -1267,7 +1267,7 @@ def main(): #----------------------------------------------- logging.debug("METADATA.pb fonts 'name' property should be same as font familyname") font_familyname = get_name_string(font, NAMEID_FONT_FAMILY_NAME) - if fullname == False: + if font_familyname == False: logging.error("This font lacks a FONT_FAMILY_NAME entry (nameID={}) in the name table.".format(NAMEID_FONT_FAMILY_NAME)) else: if font_familyname != f.name:
fix bug in test: "METADATA.pb fonts 'name' property should be same as font familyname"
py
diff --git a/plugins/oauth/server/providers/base.py b/plugins/oauth/server/providers/base.py index <HASH>..<HASH> 100644 --- a/plugins/oauth/server/providers/base.py +++ b/plugins/oauth/server/providers/base.py @@ -111,8 +111,8 @@ class ProviderBase(model_importer.ModelImporter): resp.raise_for_status() except requests.HTTPError: raise RestException( - 'Got %s from %s, response="%s".' % ( - resp.status_code, kwargs['url'], content + 'Got %s code from provider, response="%s".' % ( + resp.status_code, content ), code=502) try:
Fix a small security issue in OAuth This prevents possibly private information from leaking in an error message.
py
diff --git a/icekit/publishing/apps.py b/icekit/publishing/apps.py index <HASH>..<HASH> 100644 --- a/icekit/publishing/apps.py +++ b/icekit/publishing/apps.py @@ -80,6 +80,13 @@ class AppConfig(AppConfig): @monkey_patch_override_method(UrlNodeQuerySet) def published(self, for_user=None): qs = self._single_site() + # Avoid filtering to only published items when we are in a draft + # context and we know this method is triggered by Fluent (because + # the `for_user` is present) because we may actually want to find + # and return draft items to priveleged users in this situation. + if for_user and is_draft_request_context(): + return qs + if for_user is not None and for_user.is_staff: pass # Don't filter by publication date for Staff else:
Fix for Fluent-triggered `published` filtering in draft context When Fluent calls the `published` filter with a specific user and that user is in a DRAFT request context, ignore the fact we are in the `published` method and return draft items since we probably want to render one of them.
py
diff --git a/django_cas_ng/cas.py b/django_cas_ng/cas.py index <HASH>..<HASH> 100644 --- a/django_cas_ng/cas.py +++ b/django_cas_ng/cas.py @@ -33,8 +33,10 @@ class CASClient(object): class CASClientBase(object): def __init__(self, service_url=None, server_url=None, extra_login_params=None, renew=False, - username_attribute=None): + username_attribute=None, proxy_callback=None): + if proxy_callback: + raise ValueError('Proxy callback not supported by this CASClient') self.service_url = service_url self.server_url = server_url self.extra_login_params = extra_login_params or {} @@ -287,12 +289,12 @@ class CASClientWithSAMLV1(CASClientBase): saml_validate_url = urllib_parse.urljoin( self.server_url, 'samlValidate', ) - url = Request( + request = Request( saml_validate_url + '?' + urllib_parse.urlencode(params), - '', + self.get_saml_assertion(ticket), headers, ) - page = urlopen(url, data=self.get_saml_assertion(ticket)) + page = urlopen(request) return page
Add missing proxy_callback to CASClientBase Caused errors whenever CASClientWithSAMLV1 was constructed.
py
diff --git a/assess_log_rotation.py b/assess_log_rotation.py index <HASH>..<HASH> 100755 --- a/assess_log_rotation.py +++ b/assess_log_rotation.py @@ -7,6 +7,7 @@ from argparse import ArgumentParser from datetime import datetime import logging import re +import sys from deploy_stack import ( dump_env_logs, @@ -200,6 +201,7 @@ def main(): args.juju_path, args.debug, args.env_name, args.temp_env_name) client.destroy_environment() juju_home = get_juju_home() + bootstrap_host = None try: with temp_bootstrap_env(juju_home, client): client.bootstrap() @@ -220,6 +222,7 @@ def main(): except Exception as e: print_now("exception while dumping logs:\n") logging.exception(e) + sys.exit(1) finally: client.destroy_environment()
Return non-zero on failure, default bootstrap_host to None.
py
diff --git a/docs/conf.py b/docs/conf.py index <HASH>..<HASH> 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -38,6 +38,7 @@ if os.environ.get('READTHEDOCS', 0): mockapi.library = Mock('wand.api.library') mockapi.libmagick = Mock('wand.api.libmagick') mockapi.libc = Mock('wand.api.libc') + sys.modules['wand'].image = sys.modules['wand.image'] = Mock('wand.image') # -- General configuration -----------------------------------------------------
Mock wand.image as well
py
diff --git a/test/test_email.py b/test/test_email.py index <HASH>..<HASH> 100644 --- a/test/test_email.py +++ b/test/test_email.py @@ -64,4 +64,4 @@ class TestEmailObject(unittest.TestCase): name = "Name, Some" email.name = name - self.assertEqual(email.name, name) + self.assertEqual(email.name, '"' + name + '"')
Add quotes around expected value in new test
py
diff --git a/stagpy/stagyydata.py b/stagpy/stagyydata.py index <HASH>..<HASH> 100644 --- a/stagpy/stagyydata.py +++ b/stagpy/stagyydata.py @@ -363,6 +363,7 @@ class _Steps(dict): def __init__(self, sdat): self.sdat = sdat + self._last = UNDETERMINED super().__init__() def __setitem__(self, key, value): @@ -392,6 +393,13 @@ class _Steps(dict): super().__setitem__(istep, _Step(istep, self.sdat)) return super().__getitem__(istep) + @property + def last(self): + if self._last is UNDETERMINED: + # not necessarily the last one... + self._last = self.tseries[-1, 0] + return self._last + class _Snaps(_Steps): @@ -429,6 +437,17 @@ class _Snaps(_Steps): self._isteps.insert(isnap, istep) self.sdat.steps[istep].isnap = isnap + @property + def last(self): + if self._last is UNDETERMINED: + self._last = None + for isnap in range(99999, -1, -1): + istep = self[isnap].istep + if istep is not None: + self._last = isnap + break + return self._last + class StagyyData:
Add `last` property to _Steps and _Snaps classes It looks for the last available time step/snashot. This will be useful to handle negative istep/isnap.
py
diff --git a/ceam_tests/util.py b/ceam_tests/util.py index <HASH>..<HASH> 100644 --- a/ceam_tests/util.py +++ b/ceam_tests/util.py @@ -153,3 +153,11 @@ def generate_test_population(event): event.population_view.update(population) + +def make_dummy_column(name, initial_value): + @listens_for('initialize_simulants') + @uses_columns([name]) + def make_column(event): + event.population_view.update(pd.Series(initial_value, index=event.index, name=name)) + return make_column +
Added make_dummy_column to ceam_tests.util
py
diff --git a/telethon/client/auth.py b/telethon/client/auth.py index <HASH>..<HASH> 100644 --- a/telethon/client/auth.py +++ b/telethon/client/auth.py @@ -194,7 +194,7 @@ class AuthMethods(MessageParseMethods, UserMethods): return self async def sign_in( - self, phone=None, *, code=None, password=None, + self, phone=None, code=None, *, password=None, bot_token=None, phone_code_hash=None): """ Starts or completes the sign in process with the given phone number
Revert sign_in needing named code argument
py
diff --git a/qualysapi/util.py b/qualysapi/util.py index <HASH>..<HASH> 100644 --- a/qualysapi/util.py +++ b/qualysapi/util.py @@ -5,6 +5,8 @@ import qualysapi.config as qcconf import qualysapi.connector as qcconn import qualysapi.settings as qcs +from urllib.parse import quote_plus + __author__ = "Parag Baxi <[email protected]> & Colin Bell <[email protected]>" __copyright__ = "Copyright 2011-2013, Parag Baxi & University of Waterloo" @@ -32,7 +34,7 @@ def connect( # Use function parameter login credentials. if username and password: connect = qcconn.QGConnector( - auth=(username, password), server=hostname, max_retries=max_retries, proxies=proxies + auth=(username, quote_plus(password)), server=hostname, max_retries=max_retries, proxies=proxies ) # Retrieve login credentials from config file.
fix special characters in password Applications using Qualys API module returns error when it has special characters like % or + etc, urllib.parse.quote_plus is a fix for such issues
py
diff --git a/wandb/run_manager.py b/wandb/run_manager.py index <HASH>..<HASH> 100644 --- a/wandb/run_manager.py +++ b/wandb/run_manager.py @@ -1170,7 +1170,10 @@ class RunManager(object): # and unconditionally start the status checker. if not self._agent_run: def stop_handler(): - self.proc.interrupt() + if isinstance(self.proc, Process): + self.proc.interrupt() + else: + self.proc.send_signal(signal.SIGINT) self._run_status_checker = RunStatusChecker( self._run, self._api, stop_requested_handler=stop_handler)
Handle case where run_manager.proc is a Popen object.
py
diff --git a/tests/test_client.py b/tests/test_client.py index <HASH>..<HASH> 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -12,6 +12,7 @@ except ImportError: import unittest from algoliasearch.client import Client, MAX_API_KEY_LENGTH +from algoliasearch.helpers import AlgoliaException from .helpers import safe_index_name from .helpers import get_api_client @@ -123,20 +124,16 @@ class ClientNoDataOperationsTest(ClientTest): def test_dns_timeout(self): app_id = os.environ['ALGOLIA_APPLICATION_ID'] - hosts = [ - '%s-dsn.algolia.biz' % app_id, - '%s-dsn.algolia.net' % app_id, - '%s-1.algolianet.com' % app_id, - '%s-2.algolianet.com' % app_id, - '%s-3.algolianet.com' % app_id, - ] - + hosts = ['algolia.biz'] client = Client(app_id, os.environ['ALGOLIA_API_KEY'], hosts) client.set_timeout(5, 2) now = time.time() - indices = client.list_indexes() - self.assertLess(now + 5, time.time()) + try: + indices = client.list_indexes() + except AlgoliaException: + pass + self.assertLess(time.time(), now + 6) class ClientWithDataTest(ClientTest):
Fix the test on the DNS timeout for it to legitimately fail
py
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py index <HASH>..<HASH> 100644 --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -2343,11 +2343,7 @@ def _static_idx(idx, size): """Helper function to compute the static slice start/limit/stride values.""" assert isinstance(idx, slice) start, stop, step = idx.indices(size) - if step < 0: - length = (start - stop - 1) // (-step) + 1 if stop < start else 0 - else: - length = (stop - start - 1) // step + 1 if start < stop else 0 - if length == 0: + if (step < 0 and stop >= start) or (step > 0 and start >= stop): return 0, 0, 1, False # sliced to size zero if step > 0:
Don't explicitly compute the length; we only need to know if the interval is empty.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -20,7 +20,6 @@ from distutils.dir_util import remove_tree from distutils.spawn import spawn import os import sys -import shutil if os.environ.get('READTHEDOCS', None) == 'True': sys.exit("setup.py disabled on readthedocs: called with %s"
remove unused import The `shutil` import is no longer needed.
py
diff --git a/graphene/types/mutation.py b/graphene/types/mutation.py index <HASH>..<HASH> 100644 --- a/graphene/types/mutation.py +++ b/graphene/types/mutation.py @@ -76,7 +76,12 @@ class Mutation(ObjectType): super(Mutation, cls).__init_subclass_with_meta__(_meta=_meta, **options) @classmethod - def Field(cls, *args, **kwargs): + def Field(cls, name=None, description=None, deprecation_reason=None): return Field( - cls._meta.output, args=cls._meta.arguments, resolver=cls._meta.resolver + cls._meta.output, + args=cls._meta.arguments, + resolver=cls._meta.resolver, + name=name, + description=description, + deprecation_reason=deprecation_reason, )
Enabled possibility of setting name, description or deprecation_reason in mutation Fields Fixed #<I>, $<I> #<I> and #<I>
py
diff --git a/numina/array/fwhm.py b/numina/array/fwhm.py index <HASH>..<HASH> 100644 --- a/numina/array/fwhm.py +++ b/numina/array/fwhm.py @@ -50,14 +50,19 @@ def compute_fwhm_2d_simple(img, xc, yc): def compute_fwhm_1d_simple(Y, xc, X=None): + '''Compute the FWHM.''' + return compute_fw_at_frac_max_1d_simple(Y, xc, X=X, f=0.5) + +def compute_fw_at_frac_max_1d_simple(Y, xc, X=None, f=0.5): + '''Compute the full width at fraction f of the maximum''' if X is None: X = range(Y.shape[0]) xpix = wc_to_pix_1d(xc - X[0]) peak = Y[xpix] - fwhm_x, _codex, _msgx = compute_fwhm_1d(X, Y - 0.5 * peak, xc, xpix) + fwhm_x, _codex, _msgx = compute_fwhm_1d(X, Y - f * peak, xc, xpix) return peak, fwhm_x
Compute full with at a fraction of the peak (not only half)
py
diff --git a/assemblerflow/templates/metaspades.py b/assemblerflow/templates/metaspades.py index <HASH>..<HASH> 100644 --- a/assemblerflow/templates/metaspades.py +++ b/assemblerflow/templates/metaspades.py @@ -208,8 +208,8 @@ def main(sample_id, fastq_pair, max_len, kmer): # Get spades version for output name info = __get_version_spades() - assembly_file = "{}_metaspades{}.fasta".format( - sample_id, info["version"].replace(".", "")) + assembly_file = "{}_metaspades.fasta".format( + sample_id) os.rename("contigs.fasta", assembly_file) logger.info("Setting main assembly file to: {}".format(assembly_file))
metaspades.py: changed outputfile name (missing version - removed due to not being correctly parsed)
py
diff --git a/scout/server/config.py b/scout/server/config.py index <HASH>..<HASH> 100644 --- a/scout/server/config.py +++ b/scout/server/config.py @@ -22,8 +22,3 @@ ACCEPT_LANGUAGES = ['en', 'sv'] # FEATURE FLAGS SHOW_CAUSATIVES = False - -#MatchMaker related parameters -MME_ACCEPTS = 'application/vnd.ga4gh.matchmaker.v1.0+json' -MME_URL = 'http://localhost:9020' -MME_TOKEN = 'custom_token'
no need for test MME params in config file here
py
diff --git a/bibliopixel/main/main.py b/bibliopixel/main/main.py index <HASH>..<HASH> 100644 --- a/bibliopixel/main/main.py +++ b/bibliopixel/main/main.py @@ -23,7 +23,14 @@ def get_args(argv=sys.argv): if not argv: return - if argv and not argv[0].isidentifier() and '-h' not in argv: + # Move all the flags to the end. + args = [], [] + for a in argv: + args[a.startswith('-')].append(a) + + argv = args[0] + args[1] + + if not argv[0].isidentifier() and '-h' not in argv: # The first argument can't be a command so try to run it. argv.insert(0, 'run')
Fix flags to `bp command` (and fix #<I>)
py
diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index <HASH>..<HASH> 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -88,6 +88,7 @@ class TestSrvPolling(unittest.TestCase): CONNECTION_STRING = "mongodb+srv://test1.test.build.10gen.cc" + @unittest.skipIf(sys.version_info[0] < 3, "PYTHON-2002 fails on python 2") def setUp(self): if not _HAVE_DNSPYTHON: raise unittest.SkipTest("SRV polling tests require the dnspython "
PYTHON-<I> Skip failing dnspython SRV polling tests on Python 2
py
diff --git a/src/foremast/utils/apps.py b/src/foremast/utils/apps.py index <HASH>..<HASH> 100644 --- a/src/foremast/utils/apps.py +++ b/src/foremast/utils/apps.py @@ -48,7 +48,7 @@ def get_all_apps(): return pipelines -def get_details(app='groupproject', env='dev'): +def get_details(app='groupproject', env='dev', region='us-east-1'): """Extract details for Application. Args: @@ -75,7 +75,7 @@ def get_details(app='groupproject', env='dev'): group = app_details['attributes'].get('repoProjectKey') project = app_details['attributes'].get('repoSlug') generated = gogoutils.Generator(group, project, env=env, - formats=APP_FORMATS) + region=region, formats=APP_FORMATS) LOG.debug('Application details: %s', generated) return generated
added regions to gogoutils generator
py
diff --git a/tweepy/cache.py b/tweepy/cache.py index <HASH>..<HASH> 100644 --- a/tweepy/cache.py +++ b/tweepy/cache.py @@ -8,7 +8,6 @@ import time import threading import os import hashlib -import fcntl import cPickle as pickle try:
Removed failing import of 'fcntl'
py
diff --git a/stutils/email_utils.py b/stutils/email_utils.py index <HASH>..<HASH> 100644 --- a/stutils/email_utils.py +++ b/stutils/email_utils.py @@ -18,15 +18,19 @@ def parse(raw_email): >>> parse("John Doe <[email protected]") ('me', 'someorg.com') - >>> parse(42) + >>> parse(42) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... - InvalidEmail + InvalidEmail: 'Invalid email: 42' + >>> parse(None) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + InvalidEmail: 'None or NaN is not a valid email address' """ if not isinstance(raw_email, six.string_types): - raise InvalidEmail + raise InvalidEmail("Invalid email: %s" % raw_email) if not raw_email or pd.isnull(raw_email): - raise InvalidEmail + raise InvalidEmail("None or NaN is not a valid email address") email = raw_email.split("<", 1)[-1].split(">", 1)[0] chunks = email.split("@", 3) # git-svn generates emails with several @, e.g.:
fix(tests): make doctests Python3 compatible
py
diff --git a/salt/master.py b/salt/master.py index <HASH>..<HASH> 100644 --- a/salt/master.py +++ b/salt/master.py @@ -13,11 +13,15 @@ import hashlib import tempfile import datetime import multiprocessing +import subprocess # Import zeromq import zmq from M2Crypto import RSA +# Import Third Party Libs +import yaml + # Import salt modules import salt.crypt import salt.utils @@ -378,12 +382,12 @@ class AESFuncs(object): specified ''' if not 'id' in load: - log.error('Recieved call for external nodes without an id') + log.error('Received call for external nodes without an id') return {} if not self.opts['external_nodes']: return {} if not salt.utils.which(self.opts['external_nodes']): - log.erorr(('Specified external nodes controller {0} is not' + log.error(('Specified external nodes controller {0} is not' ' available, please verify that it is installed' '').format(self.opts['external_nodes'])) return {}
Repair imports and spelling on ext_nodes
py
diff --git a/lib/autokey/iomediator/_iomediator.py b/lib/autokey/iomediator/_iomediator.py index <HASH>..<HASH> 100644 --- a/lib/autokey/iomediator/_iomediator.py +++ b/lib/autokey/iomediator/_iomediator.py @@ -58,7 +58,7 @@ class IoMediator(threading.Thread): def shutdown(self): _logger.debug("IoMediator shutting down") self.interface.cancel() - self.queue.put_nowait((None, None, None)) + self.queue.put_nowait((None, None)) _logger.debug("Waiting for IoMediator thread to end") self.join() _logger.debug("IoMediator shutdown completed") @@ -101,7 +101,7 @@ class IoMediator(threading.Thread): def run(self): while True: keyCode, window_info = self.queue.get() - if keyCode is None and window_info.wm_title is None: + if keyCode is None and window_info is None: break numLock = self.modifiers[Key.NUMLOCK]
Fixed thrown Exception in the shutdown logic. Bug was merged into master in commit <I>b<I>e<I>a<I>b<I>e<I>a9fade<I>cfb<I>c1
py
diff --git a/mutagen/id3.py b/mutagen/id3.py index <HASH>..<HASH> 100644 --- a/mutagen/id3.py +++ b/mutagen/id3.py @@ -856,14 +856,14 @@ class MCDI(Frame): # class SYTC: unsupported # class USLT: unsupported # class SYLT: unsupported -# HashKey = property(lambda s: '%s:%r:%s'%(s.FrameID, s.lang, s.desc)) +# HashKey = property(lambda s: '%s:%s:%r'%(s.FrameID, s.desc, s.lang)) class COMM(TextFrame): "User comment" _framespec = [ EncodingSpec('encoding'), StringSpec('lang', 3), EncodedTextSpec('desc'), MultiSpec('text', EncodedTextSpec('text'), sep=u'\u0000') ] - HashKey = property(lambda s: '%s:%r:%s'%(s.FrameID, s.lang, s.desc)) + HashKey = property(lambda s: '%s:%s:%r'%(s.FrameID, s.desc, s.lang)) class RVA2(Frame): "Relative volume adjustment (2)"
COMM: Swap lang/desc order in HashKey.
py
diff --git a/tests/integration/py2/nupic/opf/expgenerator_test.py b/tests/integration/py2/nupic/opf/expgenerator_test.py index <HASH>..<HASH> 100755 --- a/tests/integration/py2/nupic/opf/expgenerator_test.py +++ b/tests/integration/py2/nupic/opf/expgenerator_test.py @@ -117,8 +117,11 @@ class ExperimentTestBaseClass(HelperTestCaseBase): raised by this method will be considered an error rather than a test failure. The default implementation does nothing. """ - - pass + global g_myEnv + if not g_myEnv: + # Setup environment + params = type('obj', (object,), {'installDir' : os.environ['NTA']}) + g_myEnv = MyTestEnvironment(params) def tearDown(self):
Fixed running of expgenerator_test with py.test
py
diff --git a/examples/mysql_example/mysql_example.py b/examples/mysql_example/mysql_example.py index <HASH>..<HASH> 100644 --- a/examples/mysql_example/mysql_example.py +++ b/examples/mysql_example/mysql_example.py @@ -9,8 +9,9 @@ With such a large set of input data, we cannot store all the comparisons we need to make in memory. Instead, we will read the pairs on demand from the MySQL database. -__Note:__ You will need to run `python -examples/mysql_example/mysql_init_db.py` before running this script. +__Note:__ You will need to run `python examples/mysql_example/mysql_init_db.py` +before running this script. See the annotates source for +[mysql_init_db](http://open-city.github.com/dedupe/doc/mysql_init_db.html) For smaller datasets (<10,000), see our [csv_example](http://open-city.github.com/dedupe/doc/csv_example.html)
added link to annotated mysql_init_db
py
diff --git a/pymbar/timeseries.py b/pymbar/timeseries.py index <HASH>..<HASH> 100644 --- a/pymbar/timeseries.py +++ b/pymbar/timeseries.py @@ -775,7 +775,7 @@ def detectEquilibration(A_t, fast=True, nskip=1): return (t, g, Neff_max) -def statisticalInefficiency_fft(A_n, mintime=3, memsafe=False): +def statisticalInefficiency_fft(A_n, mintime=3, memsafe=True): """Compute the (cross) statistical inefficiency of (two) timeseries. Parameters
set memsafe=True by default
py
diff --git a/spyderlib/widgets/ipython.py b/spyderlib/widgets/ipython.py index <HASH>..<HASH> 100644 --- a/spyderlib/widgets/ipython.py +++ b/spyderlib/widgets/ipython.py @@ -557,6 +557,8 @@ class IPythonClient(QWidget, SaveHistoryMixin): def _create_loading_page(self): loading_template = Template(LOADING) loading_img = get_image_path('loading_sprites.png') + if os.name == 'nt': + loading_img = loading_img.replace('\\', '/') message = _("Connecting to kernel...") page = loading_template.substitute(css_path=CSS_PATH, loading_img=loading_img,
IPython Console: Fix javascript alert on Windows about not being able to load the throbber image
py
diff --git a/tweepy/api.py b/tweepy/api.py index <HASH>..<HASH> 100644 --- a/tweepy/api.py +++ b/tweepy/api.py @@ -16,7 +16,7 @@ class API(object): def __init__(self, auth_handler=None, host='api.twitter.com', search_host='search.twitter.com', - cache=None, secure=False, api_root='/1', search_root='', + cache=None, secure=True, api_root='/1.1', search_root='', retry_count=0, retry_delay=0, retry_errors=None, parser=None): self.auth = auth_handler
Use Twitter <I> and use secure by default.
py
diff --git a/tools/scheduler.py b/tools/scheduler.py index <HASH>..<HASH> 100644 --- a/tools/scheduler.py +++ b/tools/scheduler.py @@ -437,7 +437,8 @@ class MPIScheduler(SubmitScheduler): def try_to_start_mpi(self, command, tasks, items): if self.p: - self.p.kill() + try: self.p.kill() + except: pass hosts = ','.join("%s:%d" % (hostname, slots) for hostname, slots in items) logging.debug("choosed hosts: %s", hosts) cmd = ['mpirun', '-prepend-rank', '-launcher', 'none', '-hosts', hosts, '-np', str(tasks)] + command @@ -467,8 +468,10 @@ class MPIScheduler(SubmitScheduler): @safe def stop(self, status): if self.started: - self.p.kill() - self.p.wait() + try: + self.p.kill() + self.p.wait() + except: pass self.tout.join() super(MPIScheduler, self).stop(status)
catch exception when mpirun has exited.
py
diff --git a/volapi/volapi.py b/volapi/volapi.py index <HASH>..<HASH> 100644 --- a/volapi/volapi.py +++ b/volapi/volapi.py @@ -539,7 +539,7 @@ class Room: options = data['options'] admin = 'admin' in options or 'staff' in options - user = 'user' in options or admin + user = ('user' in options or admin) and 'profile' in options chat_message = ChatMessage(data["nick"], msg, files=files,
MOTD is not actually a user
py
diff --git a/shap/explainers/_explainer.py b/shap/explainers/_explainer.py index <HASH>..<HASH> 100644 --- a/shap/explainers/_explainer.py +++ b/shap/explainers/_explainer.py @@ -73,7 +73,11 @@ class Explainer(): else: self.masker = maskers.Independent(masker) elif safe_isinstance(masker, ["transformers.PreTrainedTokenizer", "transformers.tokenization_utils_base.PreTrainedTokenizerBase"]): - self.masker = maskers.Text(masker) + if safe_isinstance(self.model, "transformers.PreTrainedModel") and safe_isinstance(self.model, MODELS_FOR_SEQ_TO_SEQ_CAUSAL_LM + MODELS_FOR_CAUSAL_LM): + # auto assign text infilling if model is a transformer model with lm head + self.masker = maskers.Text(masker, mask_token="<infill>") + else: + self.masker = maskers.Text(masker) elif (masker is list or masker is tuple) and masker[0] is not str: self.masker = maskers.Composite(*masker) elif (masker is dict) and ("mean" in masker):
Added auto assignment of mask token for transformer model with lm head for text infilling
py
diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index <HASH>..<HASH> 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -2072,7 +2072,9 @@ def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,str def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply): img1 = np.random.random(data_shape) + img1 = img1.astype(np.float32) img2 = np.random.random(data_shape) + img2 = img2.astype(np.float32) net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply) net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply )
Force the dtype of data to be float<I> for test_correlation (#<I>)
py
diff --git a/ckeditor_filebrowser_filer/views.py b/ckeditor_filebrowser_filer/views.py index <HASH>..<HASH> 100644 --- a/ckeditor_filebrowser_filer/views.py +++ b/ckeditor_filebrowser_filer/views.py @@ -39,7 +39,10 @@ def url_image(request, image_id, thumb_options=None, width=None, height=None): :return: JSON serialized URL components ('url', 'width', 'height') """ image = Image.objects.get(pk=image_id) - url = image.url + if getattr(image, 'canonical_url'): + url = image.canonical_url + else: + url = image.url thumbnail_options = {} if thumb_options is not None: thumbnail_options = ThumbnailOption.objects.get(pk=thumb_options).as_dict
Add canonical url if present
py
diff --git a/testing/test_python.py b/testing/test_python.py index <HASH>..<HASH> 100644 --- a/testing/test_python.py +++ b/testing/test_python.py @@ -1664,6 +1664,24 @@ class TestFuncargFactory: "*2 passed*" ]) + @pytest.mark.xfail(reason="factorydef passed to tw.line") + def test_factory_uses_unknown_funcarg_error(self, testdir): + testdir.makepyfile(""" + import pytest + + @pytest.factory(scope='session') + def arg1(missing): + return + + def test_missing(arg1): + pass + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + "*LookupError: no factory found for argument 'missing'" + ]) + + class TestResourceIntegrationFunctional: def test_parametrize_with_ids(self, testdir):
add xfailing test for issue <I>
py
diff --git a/txkoji/task.py b/txkoji/task.py index <HASH>..<HASH> 100644 --- a/txkoji/task.py +++ b/txkoji/task.py @@ -131,6 +131,7 @@ class Task(Munch): """ # (I wish there was a better way to do this.) source = self.params[0] + o = urlparse(source) # build tasks can load an SRPM from a "cli-build" tmpdir: if source.endswith('.src.rpm'): srpm = os.path.basename(source) @@ -139,8 +140,7 @@ class Task(Munch): # useful eventually, maybe in a "Package" class. return name # or an allowed SCM: - elif source.startswith('git://'): - o = urlparse(source) + elif o.scheme: package = os.path.basename(o.path) if package.endswith('.git'): return package[:-4]
task: handle arbitrary SCM schemes Prior to this change we would only parse a task's SCM source if the URL started with "git://". In Fedora's Koji they use "git+https://", so this doesn't work. As long as the urlparse() method can determine any scheme here, we should treat the value as a valid URL and attempt to parse the .path attribute.
py
diff --git a/gwpy/timeseries/timeseries.py b/gwpy/timeseries/timeseries.py index <HASH>..<HASH> 100644 --- a/gwpy/timeseries/timeseries.py +++ b/gwpy/timeseries/timeseries.py @@ -434,7 +434,7 @@ class TimeSeries(TimeSeriesBase): # calculate PSD using UI method return fft_ui.average_spectrogram(self, method_func, stride, fftlength=fftlength, overlap=overlap, - **kwargs) + window=window, **kwargs) def spectrogram2(self, fftlength, overlap=0, **kwargs): """Calculate the non-averaged power `Spectrogram` of this `TimeSeries`
TimeSeries.spectrogram: fixed missing kwarg need to actually pass on the window function
py
diff --git a/dipper/models/Genotype.py b/dipper/models/Genotype.py index <HASH>..<HASH> 100644 --- a/dipper/models/Genotype.py +++ b/dipper/models/Genotype.py @@ -383,9 +383,15 @@ class Genotype(): """ - if part_relationship is None: + # Fail loudly if parent or child identifiers are None + if parent_id is None: + raise TypeError('Attempt to pass None as parent') + elif part_id is None: + raise TypeError('Attempt to pass None as child') + elif part_relationship is None: part_relationship = self.properties['has_part'] + self.graph.addTriple(parent_id, part_relationship, part_id) return
fail hard if None is propagating (force upstream fix)
py
diff --git a/bcbio/variation/prioritize.py b/bcbio/variation/prioritize.py index <HASH>..<HASH> 100644 --- a/bcbio/variation/prioritize.py +++ b/bcbio/variation/prioritize.py @@ -52,7 +52,7 @@ def _apply_priority_filter(in_file, priority_file, data): out_handle.write(header) if "tumoronly_germline_filter" in dd.get_tools_on(data): filter_cmd = ("bcftools filter -m '+' -s 'LowPriority' " - """-e "EPR[*] != 'pass'" |""") + """-e "EPR[0] != 'pass'" |""") else: filter_cmd = "" cmd = ("bcftools annotate -a {priority_file} -h {header_file} "
Tumor-only prioritization: do not filter inputs with COSMIC support When we had known database support of a variant (`EPR=pass,cosmic`) this would result in filtering due to the second item not also being pass. bcftools uses `or` instead of `and` logic to check multiple fields: <URL>
py
diff --git a/python/herald/shell.py b/python/herald/shell.py index <HASH>..<HASH> 100644 --- a/python/herald/shell.py +++ b/python/herald/shell.py @@ -92,14 +92,14 @@ class HeraldCommands(object): """ Post a message to the given peer. """ - def callback(herald, message): + def callback(_, message): """ Received a reply """ io_handler.write_line("Got answer to {0}:", message.reply_to) io_handler.write_line(message.content) - def errback(herald, exception): + def errback(_, exception): """ Error during message transmission """
Correction according to PyLint - Avoid to use "herald" as parameter name (name collision)
py
diff --git a/holoviews/plotting/plot.py b/holoviews/plotting/plot.py index <HASH>..<HASH> 100644 --- a/holoviews/plotting/plot.py +++ b/holoviews/plotting/plot.py @@ -376,7 +376,7 @@ class CompositePlot(Plot): for path, item in self.layout.items(): if self.uniform: dim_keys = zip([d.name for d in self.dimensions - if d in item.key_dimensions], key) + if d in item.dimensions('key')], key) else: dim_keys = item.traverse(nthkey_fn, ('HoloMap',))[0] if dim_keys:
Fix to CompositePlot.get_frame method Fixes per frame normalization across a Layout
py
diff --git a/anoncreds/__metadata__.py b/anoncreds/__metadata__.py index <HASH>..<HASH> 100644 --- a/anoncreds/__metadata__.py +++ b/anoncreds/__metadata__.py @@ -1,7 +1,7 @@ """ Package metadata """ -__version_info__ = (0, 1, 2) +__version_info__ = (0, 1, 3) __version__ = '{}.{}.{}'.format(*__version_info__) __author__ = "Evernym, Inc." __license__ = "Apache 2.0"
advanced version to push to pypi
py
diff --git a/pyrogram/__init__.py b/pyrogram/__init__.py index <HASH>..<HASH> 100644 --- a/pyrogram/__init__.py +++ b/pyrogram/__init__.py @@ -16,7 +16,7 @@ # You should have received a copy of the GNU Lesser General Public License # along with Pyrogram. If not, see <http://www.gnu.org/licenses/>. -__version__ = "2.0.45" +__version__ = "2.0.46" __license__ = "GNU Lesser General Public License v3.0 (LGPL-3.0)" __copyright__ = "Copyright (C) 2017-present Dan <https://github.com/delivrance>"
Update Pyrogram to <I>
py
diff --git a/atrcopy/__init__.py b/atrcopy/__init__.py index <HASH>..<HASH> 100644 --- a/atrcopy/__init__.py +++ b/atrcopy/__init__.py @@ -66,7 +66,6 @@ def find_diskimage(filename): continue if options.verbose: print "Found parser %s" % parser.menu_name - print "%s: %s" % (filename, parser.image) break if parser is None: print "%s: Unknown disk image type" % filename @@ -352,6 +351,9 @@ def run(): "segments": [], } reverse_aliases = {z: k for k, v in command_aliases.iteritems() for z in v} + + skip_diskimage_summary = set(["crc"]) + usage = "%(prog)s [-h] [-v] [--dry-run] DISK_IMAGE [...]" subparser_usage = "%(prog)s [-h] [-v] [--dry-run] DISK_IMAGE" @@ -483,6 +485,8 @@ def run(): else: parser = find_diskimage(disk_image_name) if parser and parser.image: + if command not in skip_diskimage_summary: + print "%s: %s" % (disk_image_name, parser.image) if command == "vtoc": vtoc = parser.image.get_vtoc_object() print vtoc
Skip the disk image summary print on crc command
py
diff --git a/suds/sax/date.py b/suds/sax/date.py index <HASH>..<HASH> 100644 --- a/suds/sax/date.py +++ b/suds/sax/date.py @@ -256,8 +256,7 @@ class FixedOffsetTimezone(datetime.tzinfo, UnicodeMixin): total_seconds = self.__offset.total_seconds() else: total_seconds = (self.__offset.days * 24 * 60 * 60) + \ - (self.__offset.seconds) + \ - (self.__offset.microseconds / 1000000.0) + (self.__offset.seconds) hours = total_seconds // (60 * 60) total_seconds -= hours * 60 * 60
simplify FixedOffsetTimezone name calculation Such timezones may not have offsets with more than minute precision so we can freely assume the offset's microseconds attribute is always 0.
py
diff --git a/tests/test_acf.py b/tests/test_acf.py index <HASH>..<HASH> 100644 --- a/tests/test_acf.py +++ b/tests/test_acf.py @@ -1,9 +1,9 @@ import io +import os import pytest from steamfiles import acf -test_file_name = 'tests/test_data/appmanifest_202970.acf' - +test_file_name = os.path.join(os.path.dirname(__file__), 'test_data/appmanifest_202970.acf') @pytest.yield_fixture def acf_data():
Fix relative path not working properly <I>% of the time…
py
diff --git a/dvc/version.py b/dvc/version.py index <HASH>..<HASH> 100644 --- a/dvc/version.py +++ b/dvc/version.py @@ -6,7 +6,7 @@ import os import subprocess -_BASE_VERSION = "1.0.0b0" +_BASE_VERSION = "1.0.0b1" def _generate_version(base_version):
dvc: bump to <I>b1
py
diff --git a/raiden_contracts/deploy/__main__.py b/raiden_contracts/deploy/__main__.py index <HASH>..<HASH> 100644 --- a/raiden_contracts/deploy/__main__.py +++ b/raiden_contracts/deploy/__main__.py @@ -102,7 +102,7 @@ def setup_ctx( web3 = Web3(HTTPProvider(rpc_provider, request_kwargs={"timeout": 60})) web3.middleware_stack.inject(geth_poa_middleware, layer=0) print("Web3 provider is", web3.providers[0]) - private_key_string = get_private_key(Path(private_key)) + private_key_string = get_private_key(Path(private_key).expanduser()) if not private_key_string: raise RuntimeError("Could not access the private key.") owner = private_key_to_address(private_key_string)
Added expanduser to the private key file This allows one to use a path with `~` in it. Which can be important for scripts that use double quotes to escape variables with space in it, e.g.: python -m raiden_contracts.deploy raiden --private-key "$PRIVATE_KEY"
py
diff --git a/confidence/io.py b/confidence/io.py index <HASH>..<HASH> 100644 --- a/confidence/io.py +++ b/confidence/io.py @@ -79,7 +79,6 @@ def read_envvars(name: str, extension: typing.Optional[str] = None) -> Configura for var, value in environ.items() # TODO: document ignoring envvar_file if var.lower().startswith(prefix) and var.lower() != envvar_file} - # TODO: envvar values can only be str, how do we configure non-str values? if not values: return NotConfigured @@ -92,7 +91,8 @@ def read_envvars(name: str, extension: typing.Optional[str] = None) -> Configura # include the number of variables matched for debugging purposes logging.info(f'reading configuration from {len(values)} {prefix}* environment variables') - return Configuration({dotted(name): value for name, value in values.items()}) + # pass value to yaml.safe_load to align data type transformation with reading values from files + return Configuration({dotted(name): yaml.safe_load(value) for name, value in values.items()}) def read_envvar_file(name: str, extension: typing.Optional[str] = None) -> Configuration:
Pass values of environment variables to yaml.safe_load Should parse NAME_NS_KEY=5 as an int, rather than leaving it a str
py
diff --git a/ntcir_math_density/__main__.py b/ntcir_math_density/__main__.py index <HASH>..<HASH> 100644 --- a/ntcir_math_density/__main__.py +++ b/ntcir_math_density/__main__.py @@ -189,7 +189,11 @@ def main(): LOGGER.info("Pickling %s", args.positions.name) with gzip.open(args.positions.open("wb"), "wb") as f: - pickle.dump(positions_all, f) + pickle.dump({ + dataset.path: { + identifier: position for position, (_, identifier) + in zip(positions_all[dataset.path], identifiers_all[dataset.path]) + } for dataset in args.datasets}, f) LOGGER.info("Fitting density, and probability estimators") estimators = get_estimators(positions_all, positions_relevant)
Add support for pickling identifier positions (cont)
py
diff --git a/fudge/tests/test_fudge.py b/fudge/tests/test_fudge.py index <HASH>..<HASH> 100644 --- a/fudge/tests/test_fudge.py +++ b/fudge/tests/test_fudge.py @@ -4,7 +4,7 @@ import unittest import fudge from nose.tools import eq_, raises from fudge import ( - ExpectedCall, ExpectedCallOrder, Call, CallStack, FakeDeclarationError) + Fake, Registry, ExpectedCall, ExpectedCallOrder, Call, CallStack, FakeDeclarationError) class TestRegistry(unittest.TestCase): @@ -659,6 +659,17 @@ class TestOrderedCalls(unittest.TestCase): @raises(FakeDeclarationError) def test_cannot_remember_order_when_expect_call_is_true(self): fake = fudge.Fake(expect_call=True).remember_order() + + @raises(AssertionError) + def test_not_enough_calls(self): + r = Registry() + fake = Fake() + call_order = ExpectedCallOrder(fake) + r.remember_expected_call_order(call_order) + + exp = ExpectedCall(fake, "callMe") + call_order.add_expected_call(exp) + r.verify() \ No newline at end of file
Added test for not enough calls error (when ordered)
py
diff --git a/titlecase/__init__.py b/titlecase/__init__.py index <HASH>..<HASH> 100755 --- a/titlecase/__init__.py +++ b/titlecase/__init__.py @@ -10,7 +10,7 @@ License: http://www.opensource.org/licenses/mit-license.php import re __all__ = ['titlecase'] -__version__ = '0.8.0' +__version__ = '0.8.1' SMALL = 'a|an|and|as|at|but|by|en|for|if|in|of|on|or|the|to|v\.?|via|vs\.?' PUNCT = r"""!"#$%&'‘()*+,\-./:;?@[\\\]_`{|}~"""
Version bump to push patch to pypi
py
diff --git a/tests/test_mturk.py b/tests/test_mturk.py index <HASH>..<HASH> 100644 --- a/tests/test_mturk.py +++ b/tests/test_mturk.py @@ -1,5 +1,5 @@ import os -from nose.tools import assert_raises +from pytest import raises def creds_from_environment(): @@ -25,7 +25,7 @@ class TestMTurkService(object): def test_check_credentials_bad_credentials(self): from boto.mturk.connection import MTurkRequestError service = self.make_one(aws_access_key_id='bad', aws_secret_access_key='bad') - with assert_raises(MTurkRequestError): + with raises(MTurkRequestError): service.check_credentials() def test_check_credentials_no_creds_set_raises(self): @@ -36,7 +36,7 @@ class TestMTurkService(object): } service = self.make_one(**empty_creds) - with assert_raises(MTurkServiceException): + with raises(MTurkServiceException): service.check_credentials() def test_register_hit_type(self):
Switch to pytest for new tests
py
diff --git a/gwpy/timeseries/core.py b/gwpy/timeseries/core.py index <HASH>..<HASH> 100644 --- a/gwpy/timeseries/core.py +++ b/gwpy/timeseries/core.py @@ -1159,15 +1159,17 @@ class TimeSeriesBaseDict(OrderedDict): # -- find frametype(s) if frametype is None: - frametypes = dict() - for chan in channels: - ftype = datafind.find_best_frametype( - chan, start, end, frametype_match=frametype_match, - allow_tape=allow_tape) + matched = datafind.find_best_frametype( + channels, start, end, frametype_match=frametype_match, + allow_tape=allow_tape) + frametypes = {} + # flip dict to frametypes with a list of channels + for name, ftype in matched.items(): try: - frametypes[ftype].append(chan) + frametypes[ftype].append(name) except KeyError: - frametypes[ftype] = [chan] + frametypes[ftype] = [name] + if verbose and len(frametypes) > 1: gprint("Determined %d frametypes to read" % len(frametypes)) elif verbose:
TimeSeriesDict.find: use multi-channel find_frametype most of the time a user wants to find many channels in a single type, so should optimise to search for all in one
py
diff --git a/expynent/patterns.py b/expynent/patterns.py index <HASH>..<HASH> 100644 --- a/expynent/patterns.py +++ b/expynent/patterns.py @@ -299,6 +299,8 @@ LATITUDE = r'^(\+|-)?(?:90(?:(?:\.0{1,14})?)|(?:[0-9]|' \ LONGITUDE = r'^(\+|-)?(?:180(?:(?:\.0{1,14})?)|(?:[0-9]|[1-9]' \ r'[0-9]|1[0-7][0-9])(?:(?:\.[0-9]{1,14})?))$' +# RegEx pattern to match French phone numbers (with and without country code) +FRENCH_PHONE = r'^(?:\+33|0)\d{9}$' def file_extension(ext=''): """
Added RegEx pattern to match French phone numbers (with and without country code)
py
diff --git a/tests/test_util.py b/tests/test_util.py index <HASH>..<HASH> 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -3,6 +3,7 @@ import doctest import datascience as ds from datascience import util import numpy as np +import pytest def test_doctests(): results = doctest.testmod(util, optionflags=doctest.NORMALIZE_WHITESPACE) @@ -49,6 +50,10 @@ def test_table_apply(): newtab = util.table_apply(tab, lambda a: a+1, subset=['b', 'c']) assert all(newtab['a'] == tab['a']) assert all(newtab['b'] == tab['b'] + 1) + + with pytest.raises(ValueError) as err: + util.table_apply(tab, lambda a: a+1, subset=['b', 'd']) + assert "Colum mismatch: ['d']" in str(err.value) def _round_eq(a, b):
Added ValueError testing for util.table_apply as asked in issue #<I> Added ``ValueError`` testing for ``util.table_apply`` as asked in issue #<I>
py
diff --git a/django_tooling/management/commands/resetdb.py b/django_tooling/management/commands/resetdb.py index <HASH>..<HASH> 100644 --- a/django_tooling/management/commands/resetdb.py +++ b/django_tooling/management/commands/resetdb.py @@ -32,6 +32,7 @@ class Command(BaseCommand): counter += 1 cursor.execute('SET FOREIGN_KEY_CHECKS = 1; COMMIT') + cursor.close() connection.close() print('\nDropped {} tables'.format(counter))
FIXED: resetdb command
py
diff --git a/nolearn/cache.py b/nolearn/cache.py index <HASH>..<HASH> 100644 --- a/nolearn/cache.py +++ b/nolearn/cache.py @@ -102,7 +102,7 @@ def cached(cache_key=default_cache_key, cache_path=None): # that's passed in via the decorator call # (`default_cache_key` by default). try: - key = bytes(cache_key(*args, **kwargs), encoding='ascii') + key = str(cache_key(*args, **kwargs)).encode('ascii') except DontCache: return func(*args, **kwargs)
Fix breakage of Python 2 compatiblity
py
diff --git a/centinel/primitives/tls.py b/centinel/primitives/tls.py index <HASH>..<HASH> 100644 --- a/centinel/primitives/tls.py +++ b/centinel/primitives/tls.py @@ -32,6 +32,8 @@ def get_fingerprint(host, port=443, external=None, log_prefix=''): except ssl.SSLError: # exception could also happen here try: + # this uses the highest version SSL or TLS that both + # endpoints support cert = ssl.get_server_certificate((host, port), ssl_version=ssl.PROTOCOL_SSLv23) except Exception as exp:
add comment to TLS primitive to clarify the logic
py
diff --git a/python/setup.py b/python/setup.py index <HASH>..<HASH> 100644 --- a/python/setup.py +++ b/python/setup.py @@ -46,7 +46,7 @@ with open('__nanoversion__.txt') as nv: nanoversion='%.4s' % (line.strip()) break if len(nanoversion)>0 : - nanoversion='+'+nanoversion + nanoversion='.'+nanoversion if None in version_nums:
Use dot for nanoversion (pypi requirement).
py
diff --git a/pip_accel/tests.py b/pip_accel/tests.py index <HASH>..<HASH> 100644 --- a/pip_accel/tests.py +++ b/pip_accel/tests.py @@ -36,12 +36,13 @@ import stat import subprocess import sys import tempfile +import time import unittest # External dependencies. import coloredlogs from cached_property import cached_property -from humanfriendly import coerce_boolean, compact, concatenate +from humanfriendly import Timer, coerce_boolean, compact, concatenate from pip.commands.install import InstallCommand from pip.exceptions import DistributionNotFound @@ -849,9 +850,19 @@ def wipe_directory(pathname): :param pathname: The directory's pathname (a string). """ - if os.path.isdir(pathname): - shutil.rmtree(pathname) - os.makedirs(pathname) + timer = Timer() + while True: + try: + if os.path.isdir(pathname): + shutil.rmtree(pathname) + os.makedirs(pathname) + return + except Exception: + if timer.elapsed_time < 60: + logger.warning("Got error wiping directory (%s), retrying ..", pathname) + time.sleep(1) + else: + raise def uninstall_through_subprocess(package_name):
Still trying to avoid removing "in use" files on Windows <URL>
py
diff --git a/querybuilder/query.py b/querybuilder/query.py index <HASH>..<HASH> 100644 --- a/querybuilder/query.py +++ b/querybuilder/query.py @@ -452,7 +452,14 @@ class Query(object): for table_dict in self.joins: - join_parts.append('{0} {1} ON {2} '.format(table_dict['join_type'], self.get_table_identifier(table_dict), table_dict['condition'])) + condition = table_dict['condition'] + # TODO: handle more than one table in the condition + condition_parts = condition.split('.') + if len(condition_parts) > 1: + condition_parts[0] = self.table_alias_map.get(condition_parts[0], condition_parts[0]) + condition = '.'.join(condition_parts) + + join_parts.append('{0} {1} ON {2} '.format(table_dict['join_type'], self.get_table_identifier(table_dict), condition)) # if table_dict['type'] is Query: # join_parts.append('{0} ({1}) AS {2} ON {3} '.format(table_dict['join_type'], table_dict['query'].get_query(), table_alias, table_dict['condition'])) # else:
* Handle table alias in join condition
py
diff --git a/logzio/handler.py b/logzio/handler.py index <HASH>..<HASH> 100644 --- a/logzio/handler.py +++ b/logzio/handler.py @@ -88,13 +88,12 @@ class LogzioHandler(logging.Handler): return_json['exception'] = self.format_exception(message.exc_info) else: formatted_message = self.format(message) - return_json.update(self.extra_fields(message)) - if isinstance(formatted_message, dict): return_json.update(formatted_message) else: return_json['message'] = formatted_message + return_json.update(self.extra_fields(message)) return return_json def emit(self, record):
Fixed ignored extra fields when logging exceptions
py
diff --git a/tests/test_import.py b/tests/test_import.py index <HASH>..<HASH> 100644 --- a/tests/test_import.py +++ b/tests/test_import.py @@ -1,6 +1,5 @@ -#from sc2maptool import selectMap -#from sc2maptool.mapRecord import MapRecord +import sc2gameLobby -#def test_simple(): -# for m in selectMap(name="flat", melee=True, excludeName=True, closestMatch=False): -# assert isinstance(m, MapRecord) +def test_simple(): + # Some code here that tests things + assert True
Added bare minimum for a test
py
diff --git a/chess/variant.py b/chess/variant.py index <HASH>..<HASH> 100644 --- a/chess/variant.py +++ b/chess/variant.py @@ -465,7 +465,7 @@ class ThreeCheckBoard(chess.Board): def is_insufficient_material(self): return self.occupied == self.kings - # TODO: set FEN/EPD, zobrist hashing + # TODO: zobrist hashing def set_epd(self, epd): # Split into 5 or 6 parts.
Remaining todo is zobrist hashing
py
diff --git a/__main__.py b/__main__.py index <HASH>..<HASH> 100644 --- a/__main__.py +++ b/__main__.py @@ -18,8 +18,8 @@ if __name__=='__main__': """ defs = getKnownLadders() ALLOWED_LADDERS = list(defs) - usage_def = "Available valid ladder values for NAME:%s %s"%(os.linesep, ("%s "%os.linesep).join(ALLOWED_LADDERS)) - parser = ArgumentParser(description=usage_def, epilog="version: %s"%__version__) + description = "Available valid ladder values for NAME:%s %s"%(os.linesep, ("%s "%os.linesep).join(ALLOWED_LADDERS)) + parser = ArgumentParser(description=description, epilog="version: %s"%__version__) optionsLadderOps = parser.add_argument_group('ladder operations') optionsLadderOps.add_argument("--add" , action="store_true" , help="Add a new ladder definition from provided criterira values. ('name' is required; see criteria definition below).") optionsLadderOps.add_argument("--get" , type=str , help="the name of the ladder to use.", metavar="NAME")
- renamed variable usage_def => description; better named for its intended usage
py
diff --git a/systemd/test/test_daemon.py b/systemd/test/test_daemon.py index <HASH>..<HASH> 100644 --- a/systemd/test/test_daemon.py +++ b/systemd/test/test_daemon.py @@ -353,7 +353,7 @@ def test_daemon_notify_memleak(): try: notify('', True, 0, fds) - except ConnectionRefusedError: + except connection_error: pass assert sys.getrefcount(fd) <= ref_cnt, 'leak'
tests: python2-compat in another place
py
diff --git a/quick_cache.py b/quick_cache.py index <HASH>..<HASH> 100644 --- a/quick_cache.py +++ b/quick_cache.py @@ -157,7 +157,7 @@ class _CacheLock(object): if oldest_fp is None: if self._warnings is not None: self._warnings("cannot free enough space for quota ({0}MB > {1}MB)!", get_size(self._base) + own_size, self._quota) - return # cannot free enough space + return obj # cannot free enough space if self._warnings is not None: self._warnings("removing old cache file: '{0}'", oldest_fp) os.remove(oldest_fp)
fixed potential cache for too small caches
py
diff --git a/sorl/thumbnail/kvstores/cached_db_kvstore.py b/sorl/thumbnail/kvstores/cached_db_kvstore.py index <HASH>..<HASH> 100644 --- a/sorl/thumbnail/kvstores/cached_db_kvstore.py +++ b/sorl/thumbnail/kvstores/cached_db_kvstore.py @@ -5,6 +5,7 @@ from sorl.thumbnail.conf import settings from sorl.thumbnail.models import KVStore as KVStoreModel from sorl.thumbnail.compat import get_cache + class EMPTY_VALUE(object): pass
Add blankline to appease flake8
py
diff --git a/reana_commons/version.py b/reana_commons/version.py index <HASH>..<HASH> 100755 --- a/reana_commons/version.py +++ b/reana_commons/version.py @@ -14,4 +14,4 @@ and parsed by ``setup.py``. from __future__ import absolute_import, print_function -__version__ = "0.5.0.dev20181116" +__version__ = "0.5.0.dev20181126"
release: <I>.de<I>
py
diff --git a/minio/helpers.py b/minio/helpers.py index <HASH>..<HASH> 100644 --- a/minio/helpers.py +++ b/minio/helpers.py @@ -437,6 +437,9 @@ def is_valid_bucket_notification_config(notifications): ]) NOTIFICATION_EVENTS = set([ + 's3:ObjectAccessed:*', + 's3:ObjectAccessed:Get', + 's3:ObjectAccessed:Head', 's3:ReducedRedundancyLostObject', 's3:ObjectCreated:*', 's3:ObjectCreated:Put',
ObjectAccessed with Get and Head methods (#<I>)
py
diff --git a/zipline/algorithm.py b/zipline/algorithm.py index <HASH>..<HASH> 100644 --- a/zipline/algorithm.py +++ b/zipline/algorithm.py @@ -706,8 +706,7 @@ class TradingAlgorithm(object): @api_method def order_target_value(self, sid, target, - limit_price=None, stop_price=None, - style=None, include_open_orders=False): + limit_price=None, stop_price=None, style=None): """ Place an order to adjust a position to a target value. If the position doesn't already exist, this is equivalent to placing a new
MAINT: Removed unused keyword arg Deleted keyword arg include_open_orders, it was left over from working on handling open orders.
py
diff --git a/invenio_client/__init__.py b/invenio_client/__init__.py index <HASH>..<HASH> 100644 --- a/invenio_client/__init__.py +++ b/invenio_client/__init__.py @@ -24,10 +24,12 @@ """Python API for remote Invenio instances.""" from .connector import ( - InvenioConnector, InvenioConnectorServerError, InvenioConnectorAuthError + InvenioConnector, InvenioConnectorAuthError, InvenioConnectorServerError ) +from .version import __version__ -__all__ = ('InvenioConnector', +__all__ = ('__version__', + 'InvenioConnector', 'InvenioConnectorServerError', 'InvenioConnectorAuthError', )
invenio_client: export of __version__ * Exports `__version__` in the top level `invenio_client` module. * Amends the order of import statements in `__init__.py' in order to conform more fully to PEP 8 recommendations.
py
diff --git a/anyconfig/cli.py b/anyconfig/cli.py index <HASH>..<HASH> 100644 --- a/anyconfig/cli.py +++ b/anyconfig/cli.py @@ -114,8 +114,6 @@ def main(argv=sys.argv): (options, args) = parser.parse_args(argv[1:]) A.set_loglevel(to_log_level(options.loglevel)) - logging.basicConfig(format="%(asctime)s %(name)s: [%(levelname)s] " - "%(message)s") if not args: if options.list:
remove the lines configuring logging format as it's done in anyconfig.globals already and unnecessary
py
diff --git a/cmd2/argparse_completer.py b/cmd2/argparse_completer.py index <HASH>..<HASH> 100755 --- a/cmd2/argparse_completer.py +++ b/cmd2/argparse_completer.py @@ -440,9 +440,7 @@ class AutoCompleter(object): return completion_results def _format_completions(self, action, completions: List[Union[str, CompletionItem]]): - if completions and len(completions) > 1 and \ - isinstance(completions[0], CompletionItem): - + if completions and len(completions) > 1 and isinstance(completions[0], CompletionItem): token_width = len(action.dest) completions_with_desc = [] @@ -458,9 +456,9 @@ class AutoCompleter(object): fill_width=fill_width) completions_with_desc.append(entry) - header = '\n{: <{token_width}}{}'.format(action.dest, action.desc_header, token_width=token_width+2) - print(header) + header = '\n{: <{token_width}}{}'.format(action.dest.upper(), action.desc_header, token_width=token_width+2) + self._cmd2_app.completion_header = header self._cmd2_app.display_matches = completions_with_desc return completions
Update to use new completion_header when returning completions in tabular format with descriptions
py
diff --git a/services/managers/openfire_manager.py b/services/managers/openfire_manager.py index <HASH>..<HASH> 100755 --- a/services/managers/openfire_manager.py +++ b/services/managers/openfire_manager.py @@ -77,11 +77,15 @@ class OpenfireManager: @staticmethod def update_user_groups(username, password, groups): - api = UserService(settings.OPENFIRE_ADDRESS, settings.OPENFIRE_SECRET_KEY) - api.update_user(username, password, "", "", groups) + try: + api = UserService(settings.OPENFIRE_ADDRESS, settings.OPENFIRE_SECRET_KEY) + api.update_user(username, password, "", "", groups) + except exception.HTTPException as e: + print e @staticmethod def delete_user_groups(username, groups): + api = UserService(settings.OPENFIRE_ADDRESS, settings.OPENFIRE_SECRET_KEY) api.delete_group(username, groups)
Added details to syncgroup cache
py
diff --git a/salt/states/mount.py b/salt/states/mount.py index <HASH>..<HASH> 100644 --- a/salt/states/mount.py +++ b/salt/states/mount.py @@ -129,11 +129,16 @@ def mounted(name, if opts: for opt in opts: if opt not in active[real_name]['opts']: - ret['changes']['umount'] = "Forced remount because " \ - + "options changed" - remount_result = __salt__['mount.remount'](real_name, device, mkmnt=mkmnt, fstype=fstype, opts=opts) - ret['result'] = remount_result - return ret + if __opts__['test']: + ret['result'] = None + ret['comment'] = "Remount would be forced because options changed" + return ret + else: + ret['changes']['umount'] = "Forced remount because " \ + + "options changed" + remount_result = __salt__['mount.remount'](real_name, device, mkmnt=mkmnt, fstype=fstype, opts=opts) + ret['result'] = remount_result + return ret if real_device not in device_list: # name matches but device doesn't - need to umount ret['changes']['umount'] = "Forced unmount because devices " \
Respect test=True in mount state. Closes #<I>
py
diff --git a/tests/test_sources.py b/tests/test_sources.py index <HASH>..<HASH> 100644 --- a/tests/test_sources.py +++ b/tests/test_sources.py @@ -43,12 +43,12 @@ class TestDataFrameSource(TestCase): assert isinstance(source.start, pd.lib.Timestamp) assert isinstance(source.end, pd.lib.Timestamp) for event in source: - assert 'sid' in event - assert 'arbitrary' in event - assert 'volume' in event - assert 'price' in event - assert event['arbitrary'] == 1. - assert event['volume'] == 1000 - assert event['sid'] == 0 - assert isinstance(event['volume'], int) - assert isinstance(event['arbitrary'], float) + self.assertTrue('sid' in event) + self.assertTrue('arbitrary' in event) + self.assertTrue('volume' in event) + self.assertTrue('price' in event) + self.assertEquals(event['arbitrary'], 1.) + self.assertEquals(event['volume'], 1000) + self.assertEquals(event['sid'], 0) + self.assertTrue(isinstance(event['volume'], int)) + self.assertTrue(isinstance(event['arbitrary'], float))
Changes test sources to use unit test's assert instead of plain assert.
py
diff --git a/spockbot/mcp/datautils.py b/spockbot/mcp/datautils.py index <HASH>..<HASH> 100644 --- a/spockbot/mcp/datautils.py +++ b/spockbot/mcp/datautils.py @@ -151,9 +151,7 @@ def pack_slot(slot): return o # Metadata is a dictionary list thing that -# holds metadata about entities. Currently -# implemented as a list/tuple thing, might -# switch to dicts +# holds metadata about entities. metadata_lookup = MC_BYTE, MC_SHORT, MC_INT, MC_FLOAT, MC_STRING, MC_SLOT
We switched to dicts but never updated the comment
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -24,7 +24,8 @@ long_description = '\n\n'.join([read_content('README.rst'), install_requires = [ 'enum34 ; python_version < "3"', 'pyvisa>=1.6', - 'PyYAML', + 'PyYAML<5.3 ; python_version < "3.5"', + 'PyYAML ; python_version >= "3.5"', 'stringparser', ]
Account for dropped Python <I> support in PyYAML <I>+
py
diff --git a/tests/helpers/test_dot_notation.py b/tests/helpers/test_dot_notation.py index <HASH>..<HASH> 100644 --- a/tests/helpers/test_dot_notation.py +++ b/tests/helpers/test_dot_notation.py @@ -12,5 +12,4 @@ def test_dot(): assert dot('hey.dot.another', compile_to="{1}/{.}") == "hey/dot/another" assert dot('hey.dot.another', compile_to="/{1}/{.}") == "/hey/dot/another" with pytest.raises(ValueError): - assert dot('hey.dot.another', - compile_to="{1}//{.}") == "hey/dot/another" + assert dot('hey.dot.another', compile_to="{1}//{.}") == "hey/dot/another"
fix E<I> continuation line over-indented for visual indent
py
diff --git a/safe/common/test/test_resource_parameter_widget.py b/safe/common/test/test_resource_parameter_widget.py index <HASH>..<HASH> 100644 --- a/safe/common/test/test_resource_parameter_widget.py +++ b/safe/common/test/test_resource_parameter_widget.py @@ -22,11 +22,9 @@ from safe_extras.parameters.unit import Unit from safe_extras.parameters.metadata import unit_feet_depth, unit_metres_depth from safe.common.resource_parameter import ResourceParameter from safe.common.resource_parameter_widget import ResourceParameterWidget +from safe.test.utilities import get_qgis_app -# noinspection PyPackageRequirements -from PyQt4.QtGui import QApplication - -application = QApplication([]) +QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app() class TestResourceParameterWidget(unittest.TestCase):
Fix test that makes make test_suite stuck.
py
diff --git a/pysat/tests/test_utils.py b/pysat/tests/test_utils.py index <HASH>..<HASH> 100644 --- a/pysat/tests/test_utils.py +++ b/pysat/tests/test_utils.py @@ -109,14 +109,19 @@ class TestCIonly(): re_load(pysat) + captured = capsys.readouterr() + assert captured.out.find("Hi there!") >= 0 + + # Make sure user files are blank + with open(os.path.join(root, 'data_path.txt'), 'r') as f: + assert len(f.readlines()) == 0 + with open(os.path.join(root, 'user_modules.txt'), 'r') as f: + assert len(f.readlines()) == 0 + # Move settings back shutil.rmtree(root) shutil.move(new_root, root) - captured = capsys.readouterr() - - assert captured.out.find("Hi there!") >= 0 - class TestScaleUnits(): def setup(self):
TST: check that files are initialized
py
diff --git a/billy/commands/validate_api.py b/billy/commands/validate_api.py index <HASH>..<HASH> 100644 --- a/billy/commands/validate_api.py +++ b/billy/commands/validate_api.py @@ -19,15 +19,14 @@ class ValidateApi(BaseCommand): help = 'validate data from the API' def add_args(self): - self.add_argument('abbrs', nargs='+', help='states to validate') self.add_argument('--sunlight_key', dest='SUNLIGHT_SERVICES_KEY', help='the Sunlight API key to use') self.add_argument('--schema_dir', default=None, help='directory to use for API schemas (optional)') def handle(self, args): - for abbr in args.abbrs: - validate_api(abbr, args.schema_dir) + for metadata in db.metadata.find(): + validate_api(metadata['abbreviation'], args.schema_dir) def get_xml_schema(): cwd = os.path.split(__file__)[0]
validate api works for all states at once
py
diff --git a/perlin.py b/perlin.py index <HASH>..<HASH> 100644 --- a/perlin.py +++ b/perlin.py @@ -103,7 +103,7 @@ class BaseNoise: """ if period is not None: self.period = period - perm = range(self.period) + perm = list(range(self.period)) perm_right = self.period - 1 for i in list(perm): j = randint(0, perm_right)
Update perlin.py Got "TypeError: 'range' object does not support item assignment" in python3. Apparently range() now returns an iterator, not a sequence. This fixes it to work on python 2.x and 3.x.
py
diff --git a/pelix/shell/report.py b/pelix/shell/report.py index <HASH>..<HASH> 100644 --- a/pelix/shell/report.py +++ b/pelix/shell/report.py @@ -550,7 +550,7 @@ class ReportCommands(object): """ return str(obj) - def __dump_json(self, data): + def to_json(self, data): """ Converts the given object to a pretty-formatted JSON string @@ -570,7 +570,7 @@ class ReportCommands(object): self.make_report(session, *levels) if self.__report: - session.write_line(self.__dump_json(self.__report)) + session.write_line(self.to_json(self.__report)) else: session.write_line("No report to show") @@ -584,7 +584,7 @@ class ReportCommands(object): try: with open(filename, "w+") as fp: - fp.write(self.__dump_json(self.__report)) + fp.write(self.to_json(self.__report)) except IOError as ex: session.write_line("Error writing to file: {0}", ex)
report: Renamed the "__dump_json" method to "to_json" The method doesn't have to be private
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -62,7 +62,7 @@ extras_require = { 'sar_c': ['python-geotiepoints >= 1.1.7', 'gdal'], 'abi_l1b': ['h5netcdf'], # Writers: - 'cf': ['h5netcdf >= 0.7.2'], + 'cf': ['h5netcdf >= 0.7.3'], 'scmi': ['netCDF4 >= 1.1.8'], 'geotiff': ['gdal', 'trollimage[geotiff]'], 'mitiff': ['libtiff'],
Update h5netcdf requirement
py