diff
stringlengths
139
3.65k
message
stringlengths
8
627
diff_languages
stringclasses
1 value
diff --git a/pygmsh/built_in/geometry.py b/pygmsh/built_in/geometry.py index <HASH>..<HASH> 100644 --- a/pygmsh/built_in/geometry.py +++ b/pygmsh/built_in/geometry.py @@ -1026,6 +1026,21 @@ class Geometry: ) return + def rotate(self, input_entity, point, angle, axis): + """Translates input_entity itself by vector. + + Changes the input object. + """ + d = {1: "Line", 2: "Surface", 3: "Volume"} + self._GMSH_CODE.append( + "Rotate {{ {{{}}}, {{{}}}, {} }} {{{}{{{}}}; }}".format( + ", ".join([str(ax) for ax in axis]), + ", ".join([str(p) for p in point]), + angle, d[input_entity.dimension], + input_entity.id,)) + + return + def symmetry(self, input_entity, coefficients, duplicate=True): """Transforms all elementary entities symmetrically to a plane. The vector should contain four expressions giving the coefficients of the plane's equation.
Added rotation to built-in geom
py
diff --git a/tornado/iostream.py b/tornado/iostream.py index <HASH>..<HASH> 100644 --- a/tornado/iostream.py +++ b/tornado/iostream.py @@ -1402,7 +1402,13 @@ class SSLIOStream(IOStream): # to cause do_handshake to raise EBADF and ENOTCONN, so make # those errors quiet as well. # https://groups.google.com/forum/?fromgroups#!topic/python-tornado/ApucKJat1_0 - if self._is_connreset(err) or err.args[0] in (errno.EBADF, errno.ENOTCONN): + # Errno 0 is also possible in some cases (nc -z). + # https://github.com/tornadoweb/tornado/issues/2504 + if self._is_connreset(err) or err.args[0] in ( + 0, + errno.EBADF, + errno.ENOTCONN, + ): return self.close(exc_info=err) raise except AttributeError as err:
iostream: Add errno 0 to the list of silent errors in TLS handshakes This error is possible for some connections that don't follow through with the TLS handshake. Fixes #<I>
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,8 @@ +import sys from distutils.core import setup + for cmd in ('egg_info', 'develop'): - import sys if cmd in sys.argv: from setuptools import setup
Don't import sys twice
py
diff --git a/tools/interop_matrix/client_matrix.py b/tools/interop_matrix/client_matrix.py index <HASH>..<HASH> 100644 --- a/tools/interop_matrix/client_matrix.py +++ b/tools/interop_matrix/client_matrix.py @@ -93,6 +93,9 @@ LANG_RELEASE_MATRIX = { { 'v1.12.0': None }, + { + 'v1.13.0': None + }, ], 'go': [ { @@ -213,6 +216,9 @@ LANG_RELEASE_MATRIX = { { 'v1.12.0': None }, + { + 'v1.13.0': None + }, ], 'node': [ { @@ -289,6 +295,9 @@ LANG_RELEASE_MATRIX = { { 'v1.12.0': None }, + { + 'v1.13.0': None + }, ], 'php': [ { @@ -327,6 +336,9 @@ LANG_RELEASE_MATRIX = { { 'v1.12.0': None }, + { + 'v1.13.0': None + }, ], 'csharp': [ { @@ -370,6 +382,9 @@ LANG_RELEASE_MATRIX = { { 'v1.12.0': None }, + { + 'v1.13.0': None + }, ], }
Add version <I> to interop matrix
py
diff --git a/baron/grammator_operators.py b/baron/grammator_operators.py index <HASH>..<HASH> 100644 --- a/baron/grammator_operators.py +++ b/baron/grammator_operators.py @@ -77,7 +77,12 @@ def include_operators(pg): @pg.production("not_test : NOT not_test") def not_node((not_, comparison)): - return unitary_operator('not', target=comparison, space=not_.after_space) + return { + "type": "unitary_operator", + "value": "not", + "target": comparison, + "space": not_.after_space + } @pg.production("comparison : expr LESS comparison")
[mod] continue refactoring
py
diff --git a/bin/permutation_test.py b/bin/permutation_test.py index <HASH>..<HASH> 100755 --- a/bin/permutation_test.py +++ b/bin/permutation_test.py @@ -582,9 +582,9 @@ def parse_arguments(): type=int, default=2, help=help_str) help_str = ('Perform tsg permutation test if gene has ' - 'atleast a user specified number of deleterious mutations (default: 1)') + 'at least a user specified number of deleterious mutations (default: 5)') parser.add_argument('-d', '--deleterious', - type=int, default=1, + type=int, default=5, help=help_str) help_str = ('Maximum TSG score to allow gene to be tested for oncogene ' 'permutation test. (Default: .10)')
Increased the minimum number of deleterious mutations for a gene to be tested in the tsg permutation test
py
diff --git a/util/batch/universal/system.py b/util/batch/universal/system.py index <HASH>..<HASH> 100644 --- a/util/batch/universal/system.py +++ b/util/batch/universal/system.py @@ -21,7 +21,8 @@ elif IS_NEC: util.logging.debug('Choosing batch system {}.'.format(BATCH_SYSTEM_STR)) from util.batch.nec.system import * elif IS_NONE: - util.logging.warn('Environmental variable {} is not set. Chosing general batch system.'.format(BATCH_SYSTEM_ENV_NAME)) + util.logging.warn('Environmental variable {} is not set. Choosing general batch system.'.format(BATCH_SYSTEM_ENV_NAME)) from util.batch.general.system import * else: - raise ValueError('Batch system {} is unknown.'.format(BATCH_SYSTEM_STR)) + util.logging.warn('Batch system {} is unknown. Choosing general batch system.'.format(BATCH_SYSTEM_STR)) + from util.batch.general.system import *
MAINT: util.batch.universal.system: choose general batch system if wanted batch system is unkown.
py
diff --git a/pysat/_instrument.py b/pysat/_instrument.py index <HASH>..<HASH> 100644 --- a/pysat/_instrument.py +++ b/pysat/_instrument.py @@ -208,7 +208,7 @@ class Instrument(object): setattr(self, iattr, getattr(inst_module, iattr).lower()) else: raise AttributeError(''.join(['Supplied module ', - inst_module.__repr__(), + "{:}".format(inst_module), 'is missing required ', 'attribute: ', iattr])) @@ -843,7 +843,7 @@ class Instrument(object): # update normal metadata parameters in a single go # case must always be preserved in Meta object new_fdict = {} - for kfey in fdict: + for fkey in fdict: case_old = self.meta.var_case_name(fkey) new_fdict[case_old] = fdict[fkey] self.meta.data.rename(index=new_fdict, inplace=True)
BUG: module repr and iterator name Fixed bugs in printing the instrument module representation and a typo in an iterator variable name.
py
diff --git a/formly/views/results.py b/formly/views/results.py index <HASH>..<HASH> 100644 --- a/formly/views/results.py +++ b/formly/views/results.py @@ -44,9 +44,12 @@ class RemapView(LoginRequiredMixin, DetailView): answer_string = self.kwargs.get('answer_string') question = self.get_object() mapping = dict([(unquote(remapped_answer), answer_string) for remapped_answer in remapped_answers]) - for mapped_answer in question.mapping: - if mapped_answer in answer_string: - question.mapping.pop(mapped_answer, None) + + for mapped_answer in question.mapping.keys(): + answer = question.mapping[mapped_answer] + if answer in answer_string: + del question.mapping[mapped_answer] + question.mapping.update(mapping) question.save() for result in question.results.all():
Fix for bug when removing stale answers
py
diff --git a/dynamic_rest/filters.py b/dynamic_rest/filters.py index <HASH>..<HASH> 100644 --- a/dynamic_rest/filters.py +++ b/dynamic_rest/filters.py @@ -194,6 +194,13 @@ class DynamicFilterBackend(BaseFilterBackend): self.DEBUG = settings.DEBUG return self._build_queryset(queryset=queryset) + """ + This function was renamed and broke downstream dependencies that haven't + been updated to use the new naming convention. + """ + def _extract_filters(self, **kwargs): + return self._get_requested_filters(**kwargs) + def _get_requested_filters(self, **kwargs): """ Convert 'filters' query params into a dict that can be passed
Proxy renamed field for downstream dependencies
py
diff --git a/pymatbridge/version.py b/pymatbridge/version.py index <HASH>..<HASH> 100644 --- a/pymatbridge/version.py +++ b/pymatbridge/version.py @@ -4,7 +4,7 @@ _version_major = 0 _version_minor = 1 _version_micro = '' # use '' for first of series, number for 1 and above -_version_extra = 'dev' +_version_extra = '' #_version_extra = '' # Uncomment this for full releases # Construct full version string from these.
Increment version to tag + release.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ long_description = ('Ariane CLIP3 (client python 3) is the python implementation ' + IRC on freenode #ariane.echinopsii') setup(name='ariane_clip3', - version='0.1.3-b01', + version='0.1.3-b02', description='Ariane Python API Library', long_description=long_description, author='Mathilde Ffrench', @@ -22,7 +22,7 @@ setup(name='ariane_clip3', maintainer='Mathilde Ffrench', maintainer_email='[email protected]', url='https://github.com/echinopsii/net.echinopsii.ariane.community.cli.python3.git', - download_url='https://github.com/echinopsii/net.echinopsii.ariane.community.cli.python3.git/tarball/0.1.3-b01', + download_url='https://github.com/echinopsii/net.echinopsii.ariane.community.cli.python3.git/tarball/0.1.3-b02', packages=['ariane_clip3', 'ariane_clip3.rabbitmq', 'ariane_clip3.rest'], license='AGPLv3', install_requires=['requests', 'epika-python3', 'pykka'],
[ACC-<I>] new beta version
py
diff --git a/cmdhelper.py b/cmdhelper.py index <HASH>..<HASH> 100755 --- a/cmdhelper.py +++ b/cmdhelper.py @@ -137,11 +137,17 @@ class MyStreamHandler(logging.StreamHandler): newline will be written to the output stream.""" try: msg = self.format(record) - terminator = getattr(record, 'terminator', '\n') - if hasattr(self.stream, "encoding") and self.stream.encoding: - self.stream.write(msg) + if isinstance(msg,unicode): + if hasattr(self.stream, "encoding") and self.stream.encoding: + # Stream should take care of encoding, but do it explicitly to + # prevent bug in Python 2.6 - see + # https://stackoverflow.com/questions/8016236/python-unicode-handling-differences-between-print-and-sys-stdout-write + self.stream.write(msg.encode(self.stream.encoding)) + else: + self.stream.write(msg.encode(encoding)) else: - self.stream.write(msg.encode(encoding)) + self.stream.write(msg) + terminator = getattr(record, 'terminator', '\n') if terminator is not None: self.stream.write(terminator) self.flush()
Improve UnicodeEncodeError fix so it is also working on Python <I>
py
diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py index <HASH>..<HASH> 100644 --- a/pre_commit/languages/python.py +++ b/pre_commit/languages/python.py @@ -140,6 +140,7 @@ def py_interface(_dir, _make_venv): 'python', '-c', 'import ctypes, datetime, io, os, ssl, weakref', retcode=None, + encoding=None, ) return retcode == 0
Don't attempt to decode the healthy response
py
diff --git a/src/yamlinclude/constructor.py b/src/yamlinclude/constructor.py index <HASH>..<HASH> 100644 --- a/src/yamlinclude/constructor.py +++ b/src/yamlinclude/constructor.py @@ -7,9 +7,8 @@ Include YAML files within YAML import os.path import re from glob import iglob -from re import Pattern from sys import version_info -from typing import Sequence, Tuple +from typing import Optional, Pattern, Sequence, Tuple import yaml @@ -44,7 +43,7 @@ class YamlIncludeConstructor: self, base_dir: str = '', encoding: str = '', - reader_map: Sequence[Tuple[re.Pattern, Reader]] = None # noqa + reader_map: Optional[Sequence[Tuple[Pattern, Reader]]] = None ): """ :param str base_dir: Base directory where search including YAML files
fixing: A `re.Pattern` typing problem
py
diff --git a/insights/client/__init__.py b/insights/client/__init__.py index <HASH>..<HASH> 100644 --- a/insights/client/__init__.py +++ b/insights/client/__init__.py @@ -252,11 +252,10 @@ class InsightsClient(object): def update(self): egg_path = self.fetch() - if (egg_path and - ('core' in egg_path) and - egg_path['core'] is not None and - self.verify(egg_path['core'])): - self.install(egg_path['core'], egg_path['gpg_sig']) + if (egg_path.get('core') is not None and self.verify(egg_path['core'])['gpg']): + return self.install(egg_path['core'], egg_path['gpg_sig']) + else: + return False def verify(self, egg_path, gpg_key=constants.pub_gpg_path): """
Need to ensure a boolean is returned from InsightsClient.update() This is so that the calling code can know if handle_startup() returned early.
py
diff --git a/paramz/optimization/optimization.py b/paramz/optimization/optimization.py index <HASH>..<HASH> 100644 --- a/paramz/optimization/optimization.py +++ b/paramz/optimization/optimization.py @@ -324,7 +324,7 @@ def get_optimizer(f_min): #if rasm_available: # optimizers['rasmussen'] = opt_rasm - for opt_name in optimizers.keys(): + for opt_name in sorted(optimizers.keys()): if opt_name.lower().find(f_min.lower()) != -1: return optimizers[opt_name]
FIX: Prevent selecting optimizer dependcy on iteration order of dictionary keys.
py
diff --git a/consoleprinter/__init__.py b/consoleprinter/__init__.py index <HASH>..<HASH> 100644 --- a/consoleprinter/__init__.py +++ b/consoleprinter/__init__.py @@ -2928,7 +2928,7 @@ def remove_escapecodes(escapedstring): remove_color = remove_escapecodes remove_colors = remove_escapecodes - +strip_colors = remove_escapecodes def remove_extra_indentation(doc, stop_looking_when_encountered=None, padding=0, frontspacer=" "): """
Monday <I> November <I> (week:<I> day:<I>), <I>:<I>:<I>
py
diff --git a/libstempo/toasim.py b/libstempo/toasim.py index <HASH>..<HASH> 100644 --- a/libstempo/toasim.py +++ b/libstempo/toasim.py @@ -690,6 +690,7 @@ def createGWB(psr, Amp, gam, noCorr=False, seed=None, turnover=False, :param f0: Frequency of spectrum turnover :param beta: Spectral index of power spectram for f << f0 :param power: Fudge factor for flatness of spectrum turnover + :param userSpec: User-supplied characteristic strain spectrum :param npts: Number of points used in interpolation :param howml: Lowest frequency is 1/(howml * T)
added option in createGWB to supply custom strain spectrum
py
diff --git a/tests/test_orbit.py b/tests/test_orbit.py index <HASH>..<HASH> 100644 --- a/tests/test_orbit.py +++ b/tests/test_orbit.py @@ -4823,9 +4823,9 @@ def test_from_name_values(): "DEC of [BGK2006] HV 5 does not match SIMBAD value" assert numpy.isclose(o.dist(), 55.), \ "Parallax of [BGK2006] HV 5 does not match SIMBAD value" - assert numpy.isclose(o.pmra(), -0.023), \ + assert numpy.isclose(o.pmra(), 0.001), \ "PMRA of [BGK2006] HV 5 does not match SIMBAD value" - assert numpy.isclose(o.pmdec(), -1.179), \ + assert numpy.isclose(o.pmdec(), -0.989), \ "PMDec of [BGK2006] HV 5 does not match SIMBAD value" assert numpy.isclose(o.vlos(), 553.), \ "radial velocity of [BGK2006] HV 5 does not match SIMBAD value"
Update proper motion of hypervelocity star [BGK<I>] HV 5 used in tests to Gaia EDR3 value
py
diff --git a/marshmallow/fields.py b/marshmallow/fields.py index <HASH>..<HASH> 100644 --- a/marshmallow/fields.py +++ b/marshmallow/fields.py @@ -695,7 +695,7 @@ class Number(Field): try: ret = self._format_num(value) if self.as_string: - return text_type(ret) + return str(ret) else: return ret except (TypeError, ValueError) as err:
Fix as string behavior on py2
py
diff --git a/tests/linalg_test.py b/tests/linalg_test.py index <HASH>..<HASH> 100644 --- a/tests/linalg_test.py +++ b/tests/linalg_test.py @@ -719,7 +719,7 @@ class NumpyLinalgTest(jtu.JaxTestCase): # TODO(phawkins): 1e-1 seems like a very loose tolerance. jtu.check_grads(np.linalg.pinv, args_maker(), 2, rtol=1e-1) - + @jtu.skip_on_devices("tpu") # SVD is not implemented on the TPU backend def testPinvGradIssue2792(self): def f(p): a = np.array([[0., 0.],[-p, 1.]], np.float32) * 1 / (1 + p**2)
skip pinv test on tpu because no svd
py
diff --git a/coaster/utils/misc.py b/coaster/utils/misc.py index <HASH>..<HASH> 100644 --- a/coaster/utils/misc.py +++ b/coaster/utils/misc.py @@ -254,15 +254,21 @@ def make_name(text, delim=u'-', maxlength=50, checkused=None, counter=2): 'lankaran' >>> make_name(u'[email protected]') 'example-example-com' + >>> make_name('trailing-delimiter', maxlength=10) + 'trailing-d' + >>> make_name('trailing-delimiter', maxlength=9) + 'trailing' """ name = six.text_type(delim.join([_strip_re.sub('', x) for x in _punctuation_re.split(text.lower()) if x != ''])) name = unidecode(name).replace('@', 'a') # We don't know why unidecode uses '@' for 'a'-like chars if isinstance(text, six.text_type): # Unidecode returns str. Restore to a unicode string if original was unicode name = six.text_type(name) - if checkused is None: - return name[:maxlength] candidate = name[:maxlength] + if candidate.endswith(delim): + candidate = candidate[:-1] + if checkused is None: + return candidate existing = checkused(candidate) while existing: candidate = name[:maxlength - len(str(counter))] + str(counter)
Avoid a trailing delimiter in make_name Resolves hasgeek/funnel#<I>
py
diff --git a/src/hamster/storage/db.py b/src/hamster/storage/db.py index <HASH>..<HASH> 100644 --- a/src/hamster/storage/db.py +++ b/src/hamster/storage/db.py @@ -134,7 +134,8 @@ class Storage(storage.Storage): # running as flask app. XXX - detangle data_dir = os.path.join(module_dir, "data") else: - data_dir = os.path.join(module_dir, '..', '..', 'data') + # get ./data from ./src/hamster/storage/db.py (3 levels up) + data_dir = os.path.join(module_dir, '..', '..', '..', 'data') data_dir = os.path.realpath(data_dir)
look for ../../../data/ instead of ../../data/ (#<I>) Fix issue <I>: src/hamster-service & ... FileNotFoundError: [Errno 2] No such file or directory when there is no hamster.db.
py
diff --git a/ga4gh/datarepo.py b/ga4gh/datarepo.py index <HASH>..<HASH> 100644 --- a/ga4gh/datarepo.py +++ b/ga4gh/datarepo.py @@ -245,6 +245,17 @@ class AbstractDataRepository(object): phenotypeAssociationSet.getParentContainer().getId(), sep="\t") # TODO - please improve this listing + print("\tRnaQuantificationSets:") + for rna_quantification_set in dataset.getRnaQuantificationSets(): + print( + "\t", rna_quantification_set.getLocalId(), + rna_quantification_set.getId(), sep="\t") + for quant in rna_quantification_set.getRnaQuantifications(): + print( + "\t\t", quant.getLocalId(), + quant._description, + quant._readGroupIds[0], + quant._featureSetIds[0], sep="\t") def allReferences(self): """
Adds prints of rna information to the list command
py
diff --git a/txkoji/multicall.py b/txkoji/multicall.py index <HASH>..<HASH> 100644 --- a/txkoji/multicall.py +++ b/txkoji/multicall.py @@ -118,6 +118,6 @@ class KojiMultiCallIterator(MultiCallIterator): return item if isinstance(value, list): # Do this same rich item conversion for list of Munch objects - items_list = [self.rich_item(item) for item in value] + items_list = [self.rich_item(val) for val in value] return items_list return value
multicall: rename list element variable Some versions of flake8 have a problem with re-using the "item" variable name here: F<I> list comprehension redefines 'item' from line <I> Change it to something else to satisfy flake8.
py
diff --git a/src/armet/resources/base.py b/src/armet/resources/base.py index <HASH>..<HASH> 100644 --- a/src/armet/resources/base.py +++ b/src/armet/resources/base.py @@ -860,11 +860,19 @@ class BaseResource(object): # slice off the site prefix if one exists. # Will replace "/" with "/" if that's the prefix. stripped = url.replace(urlresolvers.get_script_prefix(), '/') - # Get the actual resource. - resolved = urlresolvers.resolve(stripped) + + try: + # Get the actual resource. + resolved = urlresolvers.resolve(stripped) + + except urlresolvers.Resolver404: + # Raise a normal exception here. + raise ValueError('No resolution found.') + # Rip out the class and kwargs from it. klass = resolved.func.__self__ kw = resolved.kwargs + # Instantiate and read that class, # returning whatever object is at that resource. obj = klass(request=self.request, **kw)
Resolver raises value error if it can't find a match.
py
diff --git a/astrodbkit/astrodb.py b/astrodbkit/astrodb.py index <HASH>..<HASH> 100755 --- a/astrodbkit/astrodb.py +++ b/astrodbkit/astrodb.py @@ -142,7 +142,7 @@ class Database: try: temp = data[col].astype(new_records[col].dtype) data.replace_column(col, temp) - except KeyError: + except (KeyError,AttributeError): continue # If a row contains photometry for multiple bands, use the *multiband argument and execute this
Added AttributeError to try/except statement in db.add_data() method
py
diff --git a/synapse/tests/test_model_inet.py b/synapse/tests/test_model_inet.py index <HASH>..<HASH> 100644 --- a/synapse/tests/test_model_inet.py +++ b/synapse/tests/test_model_inet.py @@ -1487,7 +1487,13 @@ class InetModelTest(SynTest): self.eq(valu, '::ffff:1.2.3.4') self.eq(subs.get('ipv4'), 0x01020304) - #self.nn(core.getTufoByProp('inet:addr:ipv4', '1.2.3.4')) + nv, nsubs = core.getTypeNorm('inet:addr', '::ffff:1.2.3.4') + self.eq(valu, nv) + self.eq(subs, nsubs) + + # These change when we move to using inet:addr instead of + self.raises(NoSuchForm, core.formTufoByProp, 'inet:addr', 0x01020304) + # self.nn(core.getTufoByProp('inet:addr:ipv4', '1.2.3.4')) def test_model_inet_wifi(self): with self.getRamCore() as core:
Add test to ensure that if we re-norm a value we get the same subs and valu.
py
diff --git a/cirq/study/result.py b/cirq/study/result.py index <HASH>..<HASH> 100644 --- a/cirq/study/result.py +++ b/cirq/study/result.py @@ -150,7 +150,8 @@ class Result: @property def repetitions(self) -> int: - return self.data.shape[0] + # Get the length quickly from one of the keyed results. + return len(next(iter(self.measurements.values()))) # Reason for 'type: ignore': https://github.com/python/mypy/issues/5273 def multi_measurement_histogram( # type: ignore
Fix Result constructing a pandas dataframe to compute repetitions (#<I>) - This is really really expensive compared to looking up the value more directly like this
py
diff --git a/netjsonconfig/schema.py b/netjsonconfig/schema.py index <HASH>..<HASH> 100644 --- a/netjsonconfig/schema.py +++ b/netjsonconfig/schema.py @@ -286,6 +286,8 @@ schema = { "properties": { "hostname": { "type": "string", + "maxLength": 63, + "minLength": 1, "propertyOrder": 1, }, "maintainer": {
[schema] Added minlength and maxlength to hostname
py
diff --git a/lib/svtplay_dl/service/nrk.py b/lib/svtplay_dl/service/nrk.py index <HASH>..<HASH> 100644 --- a/lib/svtplay_dl/service/nrk.py +++ b/lib/svtplay_dl/service/nrk.py @@ -13,7 +13,7 @@ from svtplay_dl.subtitle import subtitle from svtplay_dl.log import log class Nrk(Service, OpenGraphThumbMixin): - supported_domains = ['nrk.no', 'tv.nrk.no'] + supported_domains = ['nrk.no', 'tv.nrk.no', 'p3.no'] def get(self, options): data = self.get_urldata()
nrk: support for p3.no
py
diff --git a/releases/__init__.py b/releases/__init__.py index <HASH>..<HASH> 100644 --- a/releases/__init__.py +++ b/releases/__init__.py @@ -641,15 +641,6 @@ def setup(app): app.add_config_value( name="releases_{}".format(key), default=default, rebuild="html" ) - # if a string is given for `document_name`, convert it to a list - # done to maintain backwards compatibility - # https://stackoverflow.com/questions/1303243/how-to-find-out-if-a-python-object-is-a-string - PY2 = sys.version_info[0] == 2 - if PY2: - string_types = (basestring,) - else: - string_types = (str,) - if isinstance(app.config.releases_document_name, six.string_types): app.config.releases_document_name = [app.config.releases_document_name]
Never actually nixed this when moving to using six
py
diff --git a/synapse/axon.py b/synapse/axon.py index <HASH>..<HASH> 100644 --- a/synapse/axon.py +++ b/synapse/axon.py @@ -186,7 +186,7 @@ class BlobCell(s_neuron.Cell): def postCell(self): if self.neuraddr is None: - raise Exception('BlobCell requires a neuron') + raise s_common.BadConfValu(mesg='BlobCell requires a neuron') path = self.getCellDir('blobs.lmdb') mapsize = self.getConfOpt('blob:mapsize') @@ -322,7 +322,7 @@ class AxonCell(s_neuron.Cell): def postCell(self): if self.cellpool is None: - raise Exception('AxonCell requires a neuron and CellPool') + raise s_common.BadConfValu(mesg='AxonCell requires a neuron and CellPool') mapsize = self.getConfOpt('axon:mapsize')
Replace raise Exceptions with raising a specific exception
py
diff --git a/pysoa/server/schemas.py b/pysoa/server/schemas.py index <HASH>..<HASH> 100644 --- a/pysoa/server/schemas.py +++ b/pysoa/server/schemas.py @@ -30,5 +30,6 @@ JobRequestSchema = Dictionary( 'control': ControlHeaderSchema, 'context': SchemalessDictionary(key_type=UnicodeString()), 'actions': List(ActionRequestSchema), - } + }, + optional_keys=['context'], )
Make context optional Summary: It was being required before. Test Plan: Manually tested Reviewers: #foundry, seth Reviewed By: #foundry, seth Subscribers: seth, michaelmanganiello Differential Revision: <URL>
py
diff --git a/h2o-py/tests/testdir_misc/pyunit_check_strict.py b/h2o-py/tests/testdir_misc/pyunit_check_strict.py index <HASH>..<HASH> 100644 --- a/h2o-py/tests/testdir_misc/pyunit_check_strict.py +++ b/h2o-py/tests/testdir_misc/pyunit_check_strict.py @@ -6,7 +6,14 @@ from tests import pyunit_utils def check_strict(): - # inspection doesn't work with decorated functions. + # We may be either connected to an existing h2o server, or not. If we are, then discover the connection settings + # so that we don't have to start a new server (starting a new server may be not possible if h2o.jar is located in + # some unknown to us place in the system). + hc = h2o.connection() + url = None + if hc is not None: + url = hc.base_url + out = {"version_check_called": False} def tracefunc(frame, event, arg): if frame.f_code.co_name == "version_check": @@ -14,7 +21,7 @@ def check_strict(): return None sys.settrace(tracefunc) try: - h2o.init() + h2o.init(url=url) except h2o.H2OConnectionError: pass
Fixing test pyunit_check_strict, which wasn't able to run in an environment where the location of the h2o.jar executable is not known to the script
py
diff --git a/rollbar/__init__.py b/rollbar/__init__.py index <HASH>..<HASH> 100644 --- a/rollbar/__init__.py +++ b/rollbar/__init__.py @@ -935,7 +935,7 @@ def _build_person_data(request): if StarletteRequest: from rollbar.contrib.starlette.requests import hasuser else: - hasuser = lambda request: False + hasuser = lambda request: True if hasuser(request) and hasattr(request, 'user'): user_prop = request.user
fix #<I> breaking collecting users information on *not* starlette The change fixes the check to match the previous code: `if not StarletteRequest and hasattr(request, 'user'):`
py
diff --git a/symbols/call.py b/symbols/call.py index <HASH>..<HASH> 100644 --- a/symbols/call.py +++ b/symbols/call.py @@ -33,7 +33,7 @@ class SymbolCALL(Symbol): lineno: source code line where this call was made """ - def __init__(self, entry, arglist, lineno): + def __init__(self, entry: SymbolFUNCTION, arglist, lineno): super(SymbolCALL, self).__init__() assert isinstance(lineno, int) assert all(isinstance(x, SymbolARGUMENT) for x in arglist) @@ -41,6 +41,11 @@ class SymbolCALL(Symbol): self.args = arglist # Func. call / array access self.lineno = lineno + if entry.token == 'FUNCTION': + for arg, param in zip(arglist, entry.params): # Sets dependency graph for each argument -> parameter + if arg.value is not None: + arg.value.add_required_symbol(param) + @property def entry(self): return self.children[0]
Recalculate bound dependency upon instantiation When instancing the CALL class, it will also add the params of the function as `required`, which will allow us, later, to update the LBound / UBound dependency.
py
diff --git a/fermipy/tests/test_gtanalysis.py b/fermipy/tests/test_gtanalysis.py index <HASH>..<HASH> 100644 --- a/fermipy/tests/test_gtanalysis.py +++ b/fermipy/tests/test_gtanalysis.py @@ -169,6 +169,7 @@ def test_gtanalysis_residmap(create_draco_analysis): gta.residmap(model={}, make_plots=True) +@requires_git_version('02-00-00') def test_gtanalysis_find_sources(create_draco_analysis): gta = create_draco_analysis gta.load_roi('fit1') @@ -278,7 +279,7 @@ def test_gtanalysis_extension_gaussian(create_draco_analysis): gta.simulate_roi(restore=True) - +@requires_git_version('02-00-00') def test_gtanalysis_localization(create_draco_analysis): gta = create_draco_analysis gta.simulate_roi(restore=True)
Skip two failed tests for now to switch to new test system
py
diff --git a/docs/source/conf.py b/docs/source/conf.py index <HASH>..<HASH> 100755 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -57,7 +57,7 @@ source_suffix = '.rst' master_doc = 'index' # General information about the project. -project = u'aiida_nwchem' +project = u'aiida-nwchem' copyright_first_year = 2014 copyright_owners = "ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE (Theory and Simulation of Materials (THEOS) and National Centre for Computational Design and Discovery of Novel Materials (NCCR MARVEL)), Switzerland"
small change in conf.py
py
diff --git a/troposphere/elasticsearch.py b/troposphere/elasticsearch.py index <HASH>..<HASH> 100644 --- a/troposphere/elasticsearch.py +++ b/troposphere/elasticsearch.py @@ -61,7 +61,7 @@ class ElasticsearchDomain(AWSObject): props = { 'AccessPolicies': (policytypes, False), 'AdvancedOptions': (dict, False), - 'DomainName': (basestring, True), + 'DomainName': (basestring, False), 'EBSOptions': (EBSOptions, False), 'ElasticsearchClusterConfig': (ElasticsearchClusterConfig, False), 'SnapshotOptions': (SnapshotOptions, False),
DomainName isn't a required parameter (#<I>)
py
diff --git a/maintenancemode/views.py b/maintenancemode/views.py index <HASH>..<HASH> 100644 --- a/maintenancemode/views.py +++ b/maintenancemode/views.py @@ -1,6 +1,15 @@ # -*- coding: utf-8 -*- -from django.template import RequestContext, loader +import django + +if django.get_version() >= '1.8': + from django.template.loader import render_to_string +else: + from django.template import loader, RequestContext + + def render_to_string(template_name, context=None, request=None): + context_instance = RequestContext(request) if request else None + return loader.render_to_string(template_name, context, context_instance) from . import http @@ -17,9 +26,8 @@ def temporary_unavailable(request, template_name='503.html'): The path of the requested URL (e.g., '/app/pages/bad_page/') """ - t = loader.get_template(template_name) # You need to have a 503.html template. - context = RequestContext(request, { + context = { 'request_path': request.path, - }) - - return http.HttpResponseTemporaryUnavailable(t.render(context)) + } + return http.HttpResponseTemporaryUnavailable( + render_to_string(template_name, context))
fixed "RemovedInDjango<I>Warning: render() must be called with a dict, not a RequestContext..."
py
diff --git a/analysis/old_isochrone.py b/analysis/old_isochrone.py index <HASH>..<HASH> 100644 --- a/analysis/old_isochrone.py +++ b/analysis/old_isochrone.py @@ -46,6 +46,13 @@ from ugali.utils.config import Config from ugali.utils.logger import logger ############################################################ +import warnings +warnings.simplefilter('module', DeprecationWarning) +msg = "Old isochrone is deprecated" +warnings.warn(msg,DeprecationWarning) +warnings.simplefilter('default', DeprecationWarning) + + class Isochrone(Model): _params = odict([
Added deprecation warning to 'old_isochrone.py'
py
diff --git a/zhaquirks/tuya/ts0044.py b/zhaquirks/tuya/ts0044.py index <HASH>..<HASH> 100644 --- a/zhaquirks/tuya/ts0044.py +++ b/zhaquirks/tuya/ts0044.py @@ -145,7 +145,7 @@ class ZemiSmartRemote0044(CustomDevice, Tuya4ButtonTriggers): # SizePrefixedSimpleDescriptor(endpoint=2, profile=260, device_type=0, device_version=1, input_clusters=[1, 6], output_clusters=[]) # SizePrefixedSimpleDescriptor(endpoint=3, profile=260, device_type=0, device_version=1, input_clusters=[1, 6], output_clusters=[]) # SizePrefixedSimpleDescriptor(endpoint=4, profile=260, device_type=0, device_version=1, input_clusters=[1, 6], output_clusters=[]) - MODELS_INFO: [("_TZ3000_vp6clf9d", "TS0044")], + MODELS_INFO: [("_TZ3000_vp6clf9d", "TS0044"), ("_TZ3000_abci1hiu", "TS0044")], ENDPOINTS: { 1: { PROFILE_ID: zha.PROFILE_ID,
Update of Tuya Wireless Scene Remote TS<I> for _TZ<I>_abci1hiu (#<I>) Update ts<I>.py
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -20,6 +20,7 @@ from pyfakefs.fake_filesystem import __version__ import os +import setuptools NAME = 'pyfakefs' @@ -76,7 +77,7 @@ touching the real disk. The software under test requires no modification to work with pyfakefs.''', # LONG_DESCRIPTION, keywords=KEYWORDS, url=URL, - pac=CLASSIFIERS, + classifiers=CLASSIFIERS, packages=[ 'pyfakefs' ]
Correct categories arg in setup.py
py
diff --git a/yabt/dot_test.py b/yabt/dot_test.py index <HASH>..<HASH> 100644 --- a/yabt/dot_test.py +++ b/yabt/dot_test.py @@ -82,9 +82,8 @@ def test_no_buildenv_deps_in_dot(basic_conf): build_context = BuildContext(basic_conf) basic_conf.targets = ['hello:hello-app'] populate_targets_graph(build_context, basic_conf) - buildenv_targets = {':builder', ':ubuntu-gpg', ':clang', ':ubuntu', - ':gnupg'} - expected_targets = {'hello:hello-app', 'hello:hello'} + buildenv_targets = {':builder', ':ubuntu-gpg', ':clang', ':gnupg'} + expected_targets = {'hello:hello-app', 'hello:hello', ':ubuntu'} with io.StringIO() as dot_io: write_dot(build_context, basic_conf, dot_io) all_targets = set(dot_io.getvalue().split('"'))
Since we now show deps the not only buildenv depends on, ubuntu should be in the test case.
py
diff --git a/galpy/potential_src/TwoPowerSphericalPotential.py b/galpy/potential_src/TwoPowerSphericalPotential.py index <HASH>..<HASH> 100644 --- a/galpy/potential_src/TwoPowerSphericalPotential.py +++ b/galpy/potential_src/TwoPowerSphericalPotential.py @@ -15,7 +15,7 @@ class TwoPowerSphericalPotential(Potential): """Class that implements spherical potentials that are derived from two-power density models - A + A / (4 pi a^3) rho(r)= ------------------------------------ (r/a)^\alpha (1+r/a)^(\beta-\alpha) """
Update comments on TwoPowerSphericalPotential, fixes #<I>
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -2,8 +2,8 @@ from setuptools import setup, find_packages setup( name='flask_nemo', - version="0.0.2", - packages = find_packages(exclude=["examples"]), + version="0.0.3", + packages=find_packages(exclude=["examples"]), url='https://github.com/capitains/flask-capitains-nemo', license='GNU GPL', author='Thibault Clerice',
Release <I> : accept MyCapytain based endpoint object as api for calls
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -26,7 +26,7 @@ requirements = ["IBMQuantumExperience>=1.8", setup( name="qiskit", - version="0.3.7", + version="0.3.8", description="Software for developing quantum computing programs", long_description="""QISKit is a software development kit for writing quantum computing experiments, programs, and applications. Works with Python 3.5 and 3.6""",
pip release: <I>
py
diff --git a/src/rituals/invoke_tasks.py b/src/rituals/invoke_tasks.py index <HASH>..<HASH> 100644 --- a/src/rituals/invoke_tasks.py +++ b/src/rituals/invoke_tasks.py @@ -264,7 +264,7 @@ def release_prep(commit=True): handle.write(''.join(data)) scm.add_file('setup.cfg') elif changed: - notify.warning("WOULD rewrite 'setup.cfg'") + notify.warning("WOULD rewrite 'setup.cfg', but --no-commit was passed") else: notify.warning("Cannot rewrite 'setup.cfg', none found!")
:zzz: mention --no-commit in warning
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -49,7 +49,9 @@ else: def version_to_int(version): version = re.search(r'((?:\d+\.)+\d+)', version).group() - return int(''.join(version.split('.')), 16) + # Split the groups on ".", take only the first one, and print each group with leading 0 if needed + version_str = "{:02}{:02}{:02}".format(*map(int, version.split('.')[:3])) + return int(version_str, 16) def package_config():
Fix version handling for new versioning scheme of tesseract
py
diff --git a/kconfiglib.py b/kconfiglib.py index <HASH>..<HASH> 100644 --- a/kconfiglib.py +++ b/kconfiglib.py @@ -1033,6 +1033,9 @@ class Kconfig(object): self._load_old_vals() for sym in self.defined_syms: + # Note: _write_to_conf is determined when the value is + # calculated. This is a hidden function call due to + # property magic. val = sym.str_value # Note: n tristate values do not get written to auto.conf and @@ -1041,12 +1044,12 @@ class Kconfig(object): if sym._write_to_conf: if sym._old_val is None and \ sym.orig_type in (BOOL, TRISTATE) and \ - not sym.tri_value: + val == "n": # No old value (the symbol was missing or n), new value n. # No change. continue - if sym.str_value == sym._old_val: + if val == sym._old_val: # New value matches old. No change. continue
Simplify sync_deps() value tests Use the result from the initial str_value call in more places. Also add a comment to make it clear how _write_to_conf is calculated, mirroring the ones in write_config() and write_autoconf().
py
diff --git a/flask_appbuilder/models/base.py b/flask_appbuilder/models/base.py index <HASH>..<HASH> 100644 --- a/flask_appbuilder/models/base.py +++ b/flask_appbuilder/models/base.py @@ -35,12 +35,15 @@ class BaseInterface(object): def _get_attr_value(self, item, col): if not hasattr(item, col): # it's an inner obj attr - return reduce(getattr, col.split('.'), item) + try: + return reduce(getattr, col.split('.'), item) + except Exception as e: + return '' if hasattr(getattr(item, col), '__call__'): # its a function return getattr(item, col)() else: - # its attribute + # its an attribute return getattr(item, col) def get_filters(self, search_columns=None):
Fixes unhandled exception when related fields on list are null (removed from the DB, sqlite no constraints)
py
diff --git a/insights/combiners/dmesg.py b/insights/combiners/dmesg.py index <HASH>..<HASH> 100644 --- a/insights/combiners/dmesg.py +++ b/insights/combiners/dmesg.py @@ -43,9 +43,13 @@ Examples: False """ +from insights.core.filters import add_filter from insights.core.plugins import combiner from insights.parsers.dmesg import DmesgLineList from insights.parsers.dmesg_log import DmesgLog +from insights.specs import Specs + +add_filter(Specs.dmesg, 'Linux version') @combiner([DmesgLineList, DmesgLog])
Add filter for dmesg combiner (#<I>)
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,7 @@ from setuptools import setup, find_packages import os -version = '0.10' +version = '2.0dev1' here = os.path.abspath(os.path.dirname(__file__)) README = open(os.path.join(here, 'README.rst')).read()
Change version to <I>dev1
py
diff --git a/pythran/analyses/lazyness_analysis.py b/pythran/analyses/lazyness_analysis.py index <HASH>..<HASH> 100644 --- a/pythran/analyses/lazyness_analysis.py +++ b/pythran/analyses/lazyness_analysis.py @@ -325,7 +325,7 @@ class LazynessAnalysis(FunctionAnalysis): if isinstance(fun, ast.Call): # call to partial functions self.func_args_lazyness(fun.args[0], fun.args[1:] + args, node) elif fun in self.argument_effects: - # when there is an argument effet, we apply "modify" to the arg + # when there is an argument effect, apply "modify" to the arg for i, arg in enumerate(self.argument_effects[fun]): # check len of args as default is 11 args if arg and len(args) > i: @@ -336,7 +336,9 @@ class LazynessAnalysis(FunctionAnalysis): # correctly thanks to aliasing continue else: - raise PythranSyntaxError("Bad call in LazynessAnalysis", node) + # conservative choice + for arg in args: + self.modify(arg, node) def visit_Call(self, node): """
Avoid raising in uncofortable situation during lazyness analysis Better make a conservative choice instead!
py
diff --git a/pymc/tests/test_missing.py b/pymc/tests/test_missing.py index <HASH>..<HASH> 100644 --- a/pymc/tests/test_missing.py +++ b/pymc/tests/test_missing.py @@ -16,6 +16,7 @@ import aesara import numpy as np import pandas as pd import pytest +import scipy.stats from numpy import array, ma @@ -188,3 +189,21 @@ def test_missing_multivariate(): # m_miss.logp({"x_missing_simplex__": inp_vals}), # m_unobs.logp_nojac({"x_simplex__": inp_vals}) * 2, # ) + + +def test_missing_vector_parameter(): + with Model() as m: + x = Normal( + "x", + np.array([-10, 10]), + 0.1, + observed=np.array([[np.nan, 10], [-10, np.nan], [np.nan, np.nan]]), + ) + x_draws = x.eval() + assert x_draws.shape == (3, 2) + assert np.all(x_draws[:, 0] < 0) + assert np.all(x_draws[:, 1] > 0) + assert np.isclose( + m.logp({"x_missing": np.array([-10, 10, -10, 10])}), + scipy.stats.norm(scale=0.1).logpdf(0) * 6, + )
Test that missing values work as expected in distribution with vector parameters
py
diff --git a/fusesoc/coremanager.py b/fusesoc/coremanager.py index <HASH>..<HASH> 100644 --- a/fusesoc/coremanager.py +++ b/fusesoc/coremanager.py @@ -59,12 +59,12 @@ class CoreManager(object): abspath = os.path.abspath(p) if not abspath in self._cores_root: self._cores_root += [abspath] - self.load_cores(p) + self.load_cores(os.path.expanduser(p)) else: abspath = os.path.abspath(path) if not abspath in self._cores_root: self._cores_root += [abspath] - self.load_cores(path) + self.load_cores(os.path.expanduser(path)) def get_cores_root(self): return self._cores_root
coremanager.py: Allow ~ in cores_root in fusesoc.conf
py
diff --git a/spyder/plugins/variableexplorer.py b/spyder/plugins/variableexplorer.py index <HASH>..<HASH> 100644 --- a/spyder/plugins/variableexplorer.py +++ b/spyder/plugins/variableexplorer.py @@ -60,8 +60,8 @@ class VariableExplorer(QWidget, SpyderPluginMixin): """ CONF_SECTION = 'variable_explorer' CONFIGWIDGET_CLASS = VariableExplorerConfigPage - INITIAL_FREE_MEMORY_TIME_TRIGGER = 60000 - SECONDARY_FREE_MEMORY_TIME_TRIGGER = 300000 + INITIAL_FREE_MEMORY_TIME_TRIGGER = 60 * 1000 # ms + SECONDARY_FREE_MEMORY_TIME_TRIGGER = 180 * 1000 # ms sig_option_changed = Signal(str, object) def __init__(self, parent):
Change time format and value for the timers.
py
diff --git a/lib/config.py b/lib/config.py index <HASH>..<HASH> 100644 --- a/lib/config.py +++ b/lib/config.py @@ -28,7 +28,13 @@ import numpy as np import matplotlib from matplotlib.font_manager import FontProperties from matplotlib import rcParams -from cycler import cycler +HAS_CYCLER = False +try: + from cycler import cycler + HAS_CYCLER = True +except ImportError: + pass + from . import colors # use ordered dictionary to control order displayed in GUI dropdown lists @@ -45,7 +51,8 @@ LineColors = ('#1f77b4', '#d62728', '#2ca02c', '#ff7f0e', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf') -rcParams['axes.prop_cycle'] = cycler('color', LineColors) +if HAS_CYCLER and matplotlib.__version__ > '1.9': + rcParams['axes.prop_cycle'] = cycler('color', LineColors) for k in ('default', 'steps-pre','steps-mid', 'steps-post'): DrawStyleMap[k] = k
test import and matplotlib version for cycler
py
diff --git a/openquake/risklib/asset.py b/openquake/risklib/asset.py index <HASH>..<HASH> 100644 --- a/openquake/risklib/asset.py +++ b/openquake/risklib/asset.py @@ -808,13 +808,13 @@ class Exposure(object): if self.occupancy_periods: fields.extend(self.occupancy_periods.split()) fields.extend(self.tagcol.tagnames) - return set(fields) + return fields def _read_csv(self): """ :yields: asset nodes """ - expected_header = self._csv_header() + expected_header = set(self._csv_header()) for fname in self.datafiles: with open(fname, encoding='utf-8') as f: fields = next(csv.reader(f))
Cleanup [skip CI]
py
diff --git a/spadespipeline/typingclasses.py b/spadespipeline/typingclasses.py index <HASH>..<HASH> 100644 --- a/spadespipeline/typingclasses.py +++ b/spadespipeline/typingclasses.py @@ -750,6 +750,11 @@ class ResFinder(GeneSeekr): data.append(result['query_id']) data.append('...'.join([str(result['low']), str(result['high'])])) try: + # Populate the attribute storing the resfinder results + sample[self.analysistype].pipelineresults.append( + '{rgene} ({pid}%) {rclass}'.format(rgene=finalgene, + pid=percentid, + rclass=resistance)) # Only if the alignment option is selected, for inexact results, add alignments if self.align and percentid != 100.00: @@ -768,11 +773,6 @@ class ResFinder(GeneSeekr): record = SeqRecord(sample[self.analysistype].dnaseq[name], id='{}_{}'.format(sample.name, name), description='') - # Populate the attribute storing the resfinder results - sample[self.analysistype].pipelineresults.append( - '{rgene} ({pid}%) {rclass}'.format(rgene=finalgene, - pid=percentid, - rclass=resistance)) # Add the alignment, and the location of mismatches for both nucleotide and amino # acid sequences data.extend([record.format('fasta'),
Ensured that the pipelineresults attribute for resfinder_assembled was being populated
py
diff --git a/fontbakery-check-ttf.py b/fontbakery-check-ttf.py index <HASH>..<HASH> 100755 --- a/fontbakery-check-ttf.py +++ b/fontbakery-check-ttf.py @@ -1146,7 +1146,8 @@ def main(): check_bit_entry(font, "OS/2", "fsSelection", "Regular" in style or \ (style in STYLE_NAMES and - style not in RIBBI_STYLE_NAMES), + style not in RIBBI_STYLE_NAMES and + "Italic" not in style), bitmask=FSSEL_REGULAR, bitname="REGULAR")
Italics should never have fsSelection REGULAR bit set to 1 (fixes issue #<I>)
py
diff --git a/gspread/models.py b/gspread/models.py index <HASH>..<HASH> 100644 --- a/gspread/models.py +++ b/gspread/models.py @@ -414,6 +414,26 @@ class Worksheet(object): """ self.resize(cols=self.col_count + cols) + def append_row(self, values): + """"Adds a row to the worksheet and populates it with values. + Widens the worksheet if there are more values than columns. + + :param values: List of values for the new row. + """ + self.add_rows(1) + new_row = self.row_count + data_width = len(values) + if self.col_count < data_width: + self.resize(cols=data_width) + + cell_list = [] + for i, value in enumerate(values, start=1): + cell = self.cell(new_row, i) + cell.value = value + cell_list.append(cell) + + self.update_cells(cell_list) + def _finder(self, func, query): cells = self._fetch_cells()
fixes issue #<I> Feature Request: append_row( [list] )
py
diff --git a/auth_backends/__init__.py b/auth_backends/__init__.py index <HASH>..<HASH> 100644 --- a/auth_backends/__init__.py +++ b/auth_backends/__init__.py @@ -3,4 +3,4 @@ These package is designed to be used primarily with Open edX Django projects, but should be compatible with non-edX projects as well. """ -__version__ = '0.7.0' # pragma: no cover +__version__ = '1.0.0' # pragma: no cover
Updated version to <I> This package has been production-ready for quite some time. It's time to make it official!
py
diff --git a/command/bdist_msi.py b/command/bdist_msi.py index <HASH>..<HASH> 100644 --- a/command/bdist_msi.py +++ b/command/bdist_msi.py @@ -141,6 +141,8 @@ class bdist_msi (Command): bdist_base = self.get_finalized_command('bdist').bdist_base self.bdist_dir = os.path.join(bdist_base, 'msi') short_version = get_python_version() + if (not self.target_version) and self.distribution.has_ext_modules(): + self.target_version = short_version if self.target_version: self.versions = [self.target_version] if not self.skip_build and self.distribution.has_ext_modules()\
Issue #<I>: Do not try to build a version-independent installer if the package has extension modules. Also add NEWS entry for #<I>.
py
diff --git a/zubbi/scraper/connections/gerrit.py b/zubbi/scraper/connections/gerrit.py index <HASH>..<HASH> 100644 --- a/zubbi/scraper/connections/gerrit.py +++ b/zubbi/scraper/connections/gerrit.py @@ -67,8 +67,8 @@ class GerritConnection(GitConnection): def __init__( self, url, - user, - password, + user=None, + password=None, workspace="/tmp/zubbi_working_dir", web_type="cgit", web_url=None, @@ -79,9 +79,6 @@ class GerritConnection(GitConnection): self.gitweb_url = web_url or url self.web_url_builder = self.get_web_url_builder(web_type, web_url, url) - self.user = user - self.password = password - def init(self): LOGGER.info("Initializing Gerrit connection to %s", self.base_url) # Currently we don't need to do anything here
Make Gerrit credentials really optional So far, the credentials were optional in the underlying GitConnection, but had to be set in the GerritConnection. This change makes them now really optional.
py
diff --git a/koordinates/tokens.py b/koordinates/tokens.py index <HASH>..<HASH> 100644 --- a/koordinates/tokens.py +++ b/koordinates/tokens.py @@ -68,8 +68,8 @@ class Token(base.Model): @is_bound def save(self): - target_url = self._connection.get_url('TOKEN', 'PUT', 'update', {'id': self.id}) - r = self._connection.request('PUT', target_url, json=self.serialize()) + target_url = self._client.get_url('TOKEN', 'PUT', 'update', {'id': self.id}) + r = self._client.request('PUT', target_url, json=self.serialize()) return self.deserialize(r.json(), self._manager)
Token catching up with Connection rename
py
diff --git a/salt/modules/saltutil.py b/salt/modules/saltutil.py index <HASH>..<HASH> 100644 --- a/salt/modules/saltutil.py +++ b/salt/modules/saltutil.py @@ -448,7 +448,7 @@ def sync_all(saltenv=None, refresh=True): environment refresh : True - Also refresh the execution modules available to the minion. + Also refresh the execution modules and pillar data available to the minion. .. important:: @@ -486,6 +486,7 @@ def sync_all(saltenv=None, refresh=True): ret['log_handlers'] = sync_log_handlers(saltenv, False) if refresh: refresh_modules() + refresh_pillar() return ret
perform `refresh_pillar` as part of `sync_all` when `refresh=True`
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -1,14 +1,16 @@ from distutils.core import setup setup( name = 'libpagure', - packages = ['libpagure'], # this must be the same as the name above + packages = ['libpagure'], version = '0.21', description = 'A Python library for Pagure APIs.', author = 'Lei Yang', author_email = '[email protected]', - url = 'https://github.com/yangl1996/libpagure', # use the URL to the github repo - download_url = 'https://github.com/yangl1996/libpagure/tarball/0.21', # I'll explain this in a second - keywords = ['pagure', 'api', 'library'], # arbitrary keywords - classifiers = ['Programming Language :: Python'], - license = "GNU General Public License v2.0" -) \ No newline at end of file + url = 'https://github.com/yangl1996/libpagure', + download_url = 'https://github.com/yangl1996/libpagure/tarball/0.21', + keywords = ['pagure', 'api', 'library'], + classifiers = [ + 'Programming Language :: Python', + ], + license = "GNU General Public License v2.0", +)
Remove comments in the setup.py
py
diff --git a/mythril/laser/ethereum/natives.py b/mythril/laser/ethereum/natives.py index <HASH>..<HASH> 100644 --- a/mythril/laser/ethereum/natives.py +++ b/mythril/laser/ethereum/natives.py @@ -41,8 +41,10 @@ def ecrecover(data): s = extract32(data, 96) if r >= secp256k1n or s >= secp256k1n or v < 27 or v > 28: return [] - - pub = ecrecover_to_pub(message, v, r, s) + try: + pub = ecrecover_to_pub(message, v, r, s) + except ValueError: + return [] o = [0] * 12 + [x for x in sha3(pub)[-20:]] return o
Catch Value error from pyethereum
py
diff --git a/rllib/agents/a3c/a3c_torch_policy.py b/rllib/agents/a3c/a3c_torch_policy.py index <HASH>..<HASH> 100644 --- a/rllib/agents/a3c/a3c_torch_policy.py +++ b/rllib/agents/a3c/a3c_torch_policy.py @@ -64,8 +64,11 @@ def apply_grad_clipping(policy, optimizer, loss): params = list( filter(lambda p: p.grad is not None, param_group["params"])) if params: - info["grad_gnorm"] = nn.utils.clip_grad_norm_( + grad_gnorm = nn.utils.clip_grad_norm_( params, policy.config["grad_clip"]) + if isinstance(grad_gnorm, torch.Tensor): + grad_gnorm = grad_gnorm.cpu().numpy() + info["grad_gnorm"] = grad_gnorm return info
[rllib] Do not store torch tensors when using grad clipping (#<I>)
py
diff --git a/sos/jupyter/kernel.py b/sos/jupyter/kernel.py index <HASH>..<HASH> 100644 --- a/sos/jupyter/kernel.py +++ b/sos/jupyter/kernel.py @@ -473,6 +473,8 @@ class SoS_Kernel(IPythonKernel): for idx,x in enumerate(self._kernel_list): if x[1] == name: # if exist language or no new langauge defined. + env.logger.error(x) + env.logger.error(langauge) if x[2] or language is None: update_existing(idx) return x
Fix a number of small bugs related to #<I>
py
diff --git a/lmnotify/models.py b/lmnotify/models.py index <HASH>..<HASH> 100644 --- a/lmnotify/models.py +++ b/lmnotify/models.py @@ -94,7 +94,7 @@ class Sound(object): """ def __init__(self, category, sound_id, repeat=1): assert( - (category == "notification" and (sound_id in SOUND_IDS)) or + (category == "notifications" and (sound_id in SOUND_IDS)) or (category == "alarms" and (sound_id in ALARM_IDS)) ) assert(repeat > 0)
Assert had typo Prevented the use of notification sounds
py
diff --git a/plaso/parsers/docker.py b/plaso/parsers/docker.py index <HASH>..<HASH> 100644 --- a/plaso/parsers/docker.py +++ b/plaso/parsers/docker.py @@ -293,7 +293,9 @@ class DockerJSONParser(interface.FileObjectParser): split_path = file_system.SplitPath(json_file_path) try: if 'containers' in split_path: - if 'config.json' in split_path: + # For our intent, both version of the config file can be parsed + # the same way + if split_path[-1] in ['config.json', 'config.v2.json']: self._ParseContainerConfigJSON(parser_mediator, file_object) if json_file_path.endswith('-json.log'): self._ParseContainerLogJSON(parser_mediator, file_object)
Changed filter for config.json for possibility to view config.v2.json (#<I>)
py
diff --git a/asciimatics/widgets.py b/asciimatics/widgets.py index <HASH>..<HASH> 100644 --- a/asciimatics/widgets.py +++ b/asciimatics/widgets.py @@ -167,7 +167,8 @@ class Frame(Effect): within your Frame. """ - # Colour palette for the widgets within the Frame. + #: Colour palette for the widgets within the Frame. Each entry should be + #: a 3-tuple of (foreground colour, attribute, background colour). palette = { "background": (Screen.COLOUR_WHITE, Screen.A_NORMAL, Screen.COLOUR_BLUE),
Add palette to auto-generated docs
py
diff --git a/server/app.py b/server/app.py index <HASH>..<HASH> 100644 --- a/server/app.py +++ b/server/app.py @@ -73,7 +73,7 @@ class AdminOnlyCrypt(BasicAuth): acl = { 'partner': { 'remotecis': ['GET'], - 'jobs': ['GET'], + 'jobs': ['GET', 'POST'], 'jobstates': ['GET', 'POST'] } }
partner need to be able to create new job Change-Id: I8c2c<I>be1b<I>f<I>f<I>f<I>cf<I>a
py
diff --git a/models.py b/models.py index <HASH>..<HASH> 100644 --- a/models.py +++ b/models.py @@ -11,7 +11,7 @@ from abstractions import ModelGibbsSampling, ModelMeanField, ModelEM from abstractions import Distribution, GibbsSampling, MeanField, Collapsed, MaxLikelihood from distributions import Categorical, CategoricalAndConcentration from internals.labels import Labels, CRPLabels -from pyhsmm.util.stats import getdatasize +from util.stats import getdatasize class Mixture(ModelGibbsSampling, ModelMeanField, ModelEM):
removed bad pyhsmm import
py
diff --git a/hgvs/variantmapper.py b/hgvs/variantmapper.py index <HASH>..<HASH> 100644 --- a/hgvs/variantmapper.py +++ b/hgvs/variantmapper.py @@ -8,7 +8,7 @@ from Bio.Seq import Seq from bioutils.sequences import reverse_complement import recordtype -from hgvs.exceptions import HGVSDataNotAvailableError, HGVSUnsupportedOperationError, HGVSInvalidVariantError +from hgvs.exceptions import HGVSError, HGVSDataNotAvailableError, HGVSUnsupportedOperationError, HGVSInvalidVariantError import hgvs import hgvs.location import hgvs.normalizer
Fix the import of HGVSError in variantmapper
py
diff --git a/plenum/common/messages/fields.py b/plenum/common/messages/fields.py index <HASH>..<HASH> 100644 --- a/plenum/common/messages/fields.py +++ b/plenum/common/messages/fields.py @@ -247,10 +247,13 @@ class MerkleRootField(FieldBase): class TimestampField(FieldBase): _base_types = (float, int) + timeLimit = 253402290000.0 + def _specific_validation(self, val): - # TODO finish implementation - if val < 0: - return 'should be a positive number' + + if 0.0 > float(val) < 1.9: + return 'should be a positive number lower then {}, but was {}'\ + .format(val, val) class JsonField(FieldBase):
limit timestamp field by <I>
py
diff --git a/salt/config.py b/salt/config.py index <HASH>..<HASH> 100644 --- a/salt/config.py +++ b/salt/config.py @@ -288,6 +288,7 @@ def master_config(path): 'renderer': 'yaml_jinja', 'failhard': False, 'state_top': 'top.sls', + 'master_tops': {}, 'external_nodes': '', 'order_masters': False, 'job_cache': True,
Add master_tops to config.py
py
diff --git a/python_modules/libraries/dagster-fivetran/dagster_fivetran/asset_defs.py b/python_modules/libraries/dagster-fivetran/dagster_fivetran/asset_defs.py index <HASH>..<HASH> 100644 --- a/python_modules/libraries/dagster-fivetran/dagster_fivetran/asset_defs.py +++ b/python_modules/libraries/dagster-fivetran/dagster_fivetran/asset_defs.py @@ -44,7 +44,7 @@ def build_fivetran_assets( .. code-block:: python - from dagster import AssetKey, build_assets_job + from dagster import AssetKey, repository, with_resources from dagster_fivetran import fivetran_resource from dagster_fivetran.assets import build_fivetran_assets @@ -61,11 +61,12 @@ def build_fivetran_assets( table_names=["schema1.table1", "schema2.table2"], ]) - my_fivetran_job = build_assets_job( - "my_fivetran_job", - assets=[fivetran_assets], - resource_defs={"fivetran": my_fivetran_resource} - ) + @repository + def repo(): + return with_resources( + fivetran_assets, + resource_defs={"fivetran": my_fivetran_resource}, + ) """
update build_fivetran_assets snippet to avoid build_assets_job (#<I>)
py
diff --git a/rows/utils/__init__.py b/rows/utils/__init__.py index <HASH>..<HASH> 100644 --- a/rows/utils/__init__.py +++ b/rows/utils/__init__.py @@ -791,8 +791,6 @@ class CsvLazyDictWriter: self._fobj = None self.writer_args = args self.writer_kwargs = kwargs - self.writer_kwargs["lineterminator"] = kwargs.get("lineterminator", "\n") - # TODO: check if it should be the same in other OSes def __enter__(self): return self
Do not force lineterminator on CsvLazyDictWriter The CSV RFC defines it should be a CRLF, not LF
py
diff --git a/docker/utils/build.py b/docker/utils/build.py index <HASH>..<HASH> 100644 --- a/docker/utils/build.py +++ b/docker/utils/build.py @@ -91,6 +91,10 @@ def walk(root, patterns, default=True): matched = default if hit is None else hit sub = list(filter(lambda p: p[1], sub)) if os.path.isdir(cur): + # Entirely skip directories if there are no chance any subfile will + # be included. + if all(not p[0] for p in sub) and not matched: + continue children = False for r in (os.path.join(f, p) for p in walk(cur, sub, matched)): yield r
Skip entirely excluded directories when handling dockerignore. This is pure optimization to not recurse into directories when there are no chances any file will be included.
py
diff --git a/fulfil_client/client.py b/fulfil_client/client.py index <HASH>..<HASH> 100755 --- a/fulfil_client/client.py +++ b/fulfil_client/client.py @@ -19,6 +19,7 @@ loads = partial(json.loads, object_hook=JSONDecoder()) class Error(Exception): def __init__(self, message, code): super(Exception, self).__init__(message) + self.message = message self.code = code
Add message attribute on ClientError Python3 doesn't have message attr on Base Exception
py
diff --git a/src/peltak/commands/impl/lint.py b/src/peltak/commands/impl/lint.py index <HASH>..<HASH> 100644 --- a/src/peltak/commands/impl/lint.py +++ b/src/peltak/commands/impl/lint.py @@ -21,6 +21,7 @@ def _lint_files(paths, include=None, exclude=None, pretend=False): :param paths: Iterable with each item being path that should be linted.. """ + allow_empty = True pylint_cfg_path = conf.get_path('lint.pylint_cfg', 'ops/tools/pylint.ini') pep8_cfg_path = conf.get_path('lint.pep8_cfg', 'ops/tools/pep8.ini') @@ -36,9 +37,13 @@ def _lint_files(paths, include=None, exclude=None, pretend=False): fs.filtered_walk(p, include, exclude) for p in paths )) - log.info("Files:") - for p in files: - log.info(" <0>{}", p) + if files: + log.info("Files:") + for p in files: + log.info(" <0>{}", p) + else: + log.err("No files found for linting, exiting...") + return allow_empty log.info("Collected <33>{} <32>files in <33>{}s".format( len(files), t.elapsed_s
Do not run linter when there are no files to lint
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -2,11 +2,14 @@ from metaknowledge.journalAbbreviations import abrevDBname from setuptools import setup, find_packages setup(name='metaknowledge', - version='0.2.0', - author="Reid McIlroy-Young", + version='0.3.0', + description = "A library for handling Web of science files", + author="Reid McIlroy-Young, John McLevey", author_email = "[email protected]", url="https://github.com/networks-lab/metaknowledge", + download_url = "https://github.com/networks-lab/isilib/archive/0.3.tar.gz", keywords= 'isi wos testing', + classifiers = [], install_requires= ['networkx'], #extras_require={'visualizer' : ['matplotlib']}, packages=find_packages(),#['metaknowledge', 'metaknowledge.journalAbbreviations'],
Chnged stuff for PyPi test
py
diff --git a/numina/core/dataframe.py b/numina/core/dataframe.py index <HASH>..<HASH> 100644 --- a/numina/core/dataframe.py +++ b/numina/core/dataframe.py @@ -23,6 +23,8 @@ Basic Data Products import warnings +import pyfits + class DataFrame(object): def __init__(self, frame=None, filename=None, itype='UNKNOWN'): if frame is None and filename is None: @@ -31,6 +33,12 @@ class DataFrame(object): self.filename = filename self.itype = itype + def open(self): + if self.frame is None: + return pyfits.open(self.filename, memmap=True, mode='readonly') + else: + return self.frame + def __getstate__(self): if self.frame is None and self.filename is None: raise ValueError('only one in frame and filename can be None')
Allow open method in DataFrame, either returns the HDUList or opens it
py
diff --git a/theanets/layers.py b/theanets/layers.py index <HASH>..<HASH> 100644 --- a/theanets/layers.py +++ b/theanets/layers.py @@ -426,7 +426,7 @@ class Layer(Base): value = call(expr) if isinstance(level, (int, float)): key = '<{}'.format(level) - value = TT.cast(100, FLOAT) * (expr < TT.cast(level, FLOAT)).mean() + value = (expr < TT.cast(level, FLOAT)).mean() monits.append(('{}.{}{}'.format(self.name, name, key), value)) return outs, monits, updates
Return threshold monitors as ratios, not percents.
py
diff --git a/tornado/ioloop.py b/tornado/ioloop.py index <HASH>..<HASH> 100644 --- a/tornado/ioloop.py +++ b/tornado/ioloop.py @@ -197,7 +197,7 @@ class IOLoop(object): try: event_pairs = self._impl.poll(poll_timeout) except Exception, e: - if e.errno == errno.EINTR: + if hasattr(e, 'errno') and e.errno == errno.EINTR: _log.warning("Interrupted system call", exc_info=1) continue else:
When using the select based IOLoop, select can throw an error if based to many socket. This does not have an errno
py
diff --git a/summary/__init__.py b/summary/__init__.py index <HASH>..<HASH> 100644 --- a/summary/__init__.py +++ b/summary/__init__.py @@ -205,14 +205,16 @@ class Summary(object): stream = response.iter_content(config.CHUNK_SIZE) # , decode_unicode=True response.stream = stream while True: - chunk = next(stream) - self._html += chunk - tag = find_tag(tag_name) - if tag: - return tag - if len(self._html) > config.HTML_MAX_BYTESIZE: - raise HTMLParseError('Maximum response size reached.') - response.consumed = True + try: + chunk = next(stream) + self._html += chunk + tag = find_tag(tag_name) + if tag: + return tag + if len(self._html) > config.HTML_MAX_BYTESIZE: + raise HTMLParseError('Maximum response size reached.') + except StopIteration: + response.consumed = True tag = find_tag(tag_name) return decode(tag, encoding) # decode here
Properly iterate stream response content.
py
diff --git a/stanfordnlp/models/common/hlstm.py b/stanfordnlp/models/common/hlstm.py index <HASH>..<HASH> 100644 --- a/stanfordnlp/models/common/hlstm.py +++ b/stanfordnlp/models/common/hlstm.py @@ -36,9 +36,9 @@ class HLSTMCell(nn.modules.rnn.RNNCellBase): # vanilla LSTM computation rec_input = torch.cat([input, hx[0]], 1) i = F.sigmoid(self.Wi(rec_input)) - f = F.sigmoid(self.Wi(rec_input)) - o = F.sigmoid(self.Wi(rec_input)) - g = F.tanh(self.Wi(rec_input)) + f = F.sigmoid(self.Wf(rec_input)) + o = F.sigmoid(self.Wo(rec_input)) + g = F.tanh(self.Wg(rec_input)) # highway gates gate = F.sigmoid(self.gate(torch.cat([c_l_minus_one, hx[1], input], 1)))
Fix the alternative Highway LSTM implementation
py
diff --git a/oauthlib/common.py b/oauthlib/common.py index <HASH>..<HASH> 100644 --- a/oauthlib/common.py +++ b/oauthlib/common.py @@ -85,6 +85,7 @@ def urldecode(query): if len(re.findall(invalid_hex, query)): raise ValueError('Invalid hex encoding in query string.') + query = query.decode('utf-8') if isinstance(query, str) else query # We want to allow queries such as "c2" whereas urlparse.parse_qsl # with the strict_parsing flag will not. params = urlparse.parse_qsl(query, keep_blank_values=True)
Decode query to unicode in urldecode For completeness' sake.
py
diff --git a/tests/test_cls_log.py b/tests/test_cls_log.py index <HASH>..<HASH> 100644 --- a/tests/test_cls_log.py +++ b/tests/test_cls_log.py @@ -137,7 +137,10 @@ class LogTest(unittest.TestCase): self.mylog.record_command( 'logging a string') self.mylog.record_command( ['logging a list', 'more list items']) self.mylog.record_command( {'01':'logging a dictionary', '02':'more dict logging'}) - + + def test_20_utf8_characters(self): + self.mylog.record_source('BAD_CHARS', 'α') + if __name__ == '__main__': unittest.main()
test with unicode for cls_log
py
diff --git a/pushbullet_cli/app.py b/pushbullet_cli/app.py index <HASH>..<HASH> 100755 --- a/pushbullet_cli/app.py +++ b/pushbullet_cli/app.py @@ -83,6 +83,13 @@ def purge(): pb.delete_push(current_push['iden']) [email protected]("list-devices", help="List your devices") +def purge(): + pb = _get_pb() + for i, device in enumerate(pb.devices): + print("{0}. {1}".format(i, device.nickname)) + + @main.command("set-key", help="Set your API key.") def set_key(): key = getpass.getpass("Enter your security token from https://www.pushbullet.com/account: ")
Implemented list-devices
py
diff --git a/docker/client.py b/docker/client.py index <HASH>..<HASH> 100644 --- a/docker/client.py +++ b/docker/client.py @@ -66,8 +66,8 @@ class Client(requests.Session): if version is None: self._version = DEFAULT_DOCKER_API_VERSION elif isinstance(version, six.string_types): - if version.lower() == "auto": - self._version = self.retrieve_server_version() + if version.lower() == 'auto': + self._version = self._retrieve_server_version() else: self._version = version else: @@ -77,13 +77,18 @@ class Client(requests.Session): ) ) - def retrieve_server_version(self): - response = self.version(api_version=False) + def _retrieve_server_version(self): try: - return response["ApiVersion"] + return self.version(api_version=False)["ApiVersion"] except KeyError: - raise ValueError("Invalid response from docker daemon: " - "key \"ApiVersion\" is missing.") + raise errors.DockerException( + 'Invalid response from docker daemon: key "ApiVersion"' + ' is missing.' + ) + except Exception as e: + raise errors.DockerException( + 'Error while fetching server API version: {0}'.format(e) + ) def _set_request_timeout(self, kwargs): """Prepare the kwargs for an HTTP request by inserting the timeout
Streamline exceptions for auto version ; retrieve_server_version into protected method
py
diff --git a/filer/admin/folderadmin.py b/filer/admin/folderadmin.py index <HASH>..<HASH> 100644 --- a/filer/admin/folderadmin.py +++ b/filer/admin/folderadmin.py @@ -44,6 +44,7 @@ from filer.utils.filer_easy_thumbnails import FilerActionThumbnailer from filer.thumbnail_processors import normalize_subject_location from django.conf import settings as django_settings import os +import re import itertools @@ -288,8 +289,9 @@ class FolderAdmin(PrimitivePermissionAwareModelAdmin): if order_by is not None: order_by = order_by.split(',') order_by = [field for field in order_by - if field.replace('-', '') in self.order_by_file_fields] - file_qs = file_qs.order_by(*order_by) + if re.sub(r'^-', '', field) in self.order_by_file_fields] + if len(order_by) > 0: + file_qs = file_qs.order_by(*order_by) folder_children = [] folder_files = [] @@ -319,7 +321,7 @@ class FolderAdmin(PrimitivePermissionAwareModelAdmin): except: permissions = {} - if order_by is None: + if order_by is None or len(order_by) == 0: folder_files.sort() items = folder_children + folder_files
Check if order_by field is in whitelist with regexp
py
diff --git a/doberman/utils/auth.py b/doberman/utils/auth.py index <HASH>..<HASH> 100644 --- a/doberman/utils/auth.py +++ b/doberman/utils/auth.py @@ -110,6 +110,7 @@ class AccessAttempt(AccessIPAddress): return render_to_response( self.template_name, {'user_attempts': self.last_attempt_instance, - 'lockout_time': self.block_login_seconds + 'lockout_time': self.block_login_seconds, + 'ip_address': self.ip }, context_instance=RequestContext(self.request) ) \ No newline at end of file
adding ip_address to context of lockout template
py
diff --git a/tests/acceptance/test_dcos_package.py b/tests/acceptance/test_dcos_package.py index <HASH>..<HASH> 100644 --- a/tests/acceptance/test_dcos_package.py +++ b/tests/acceptance/test_dcos_package.py @@ -13,9 +13,18 @@ def test_uninstall_package_and_wait(): uninstall_package_and_wait('chronos') assert package_installed('chronos') == False +def task_cpu_predicate(service, task): + try: + response = get_service_task(service, task) + except Exception as e: + pass + + return (response is not None) and ('resources' in response) and ('cpus' in response['resources']) + + def test_install_package_with_json_options(): install_package_and_wait('chronos', None, 'big-chronos', None, {"chronos": {"cpus": 2}}) - wait_for_task_property('marathon', 'big-chronos', 'resources') + wait_for(lambda: task_cpu_predicate('marathon', 'big-chronos')) assert get_service_task('marathon', 'big-chronos')['resources']['cpus'] == 2 uninstall_package_and_wait('chronos')
adding new check for cpu to be in place before we continue testing.
py
diff --git a/great_expectations/data_context/datasource/databricks_generator.py b/great_expectations/data_context/datasource/databricks_generator.py index <HASH>..<HASH> 100644 --- a/great_expectations/data_context/datasource/databricks_generator.py +++ b/great_expectations/data_context/datasource/databricks_generator.py @@ -1,4 +1,4 @@ -import datetime +import time import logging from .batch_generator import BatchGenerator @@ -38,6 +38,6 @@ class DatabricksTableGenerator(BatchGenerator): return iter( { "query": query, - "timestamp": datetime.datetime.timestamp(datetime.now()) + "timestamp": time.time() } )
Update timestamp generation for databricks_generator
py