diff
stringlengths 139
3.65k
| message
stringlengths 8
627
| diff_languages
stringclasses 1
value |
---|---|---|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100755
--- a/setup.py
+++ b/setup.py
@@ -43,7 +43,7 @@ if __name__ == '__main__':
setup(name='AdvancedHTMLParser',
version='9.0.0',
- packages=['AdvancedHTMLParser'],
+ packages=['AdvancedHTMLParser', 'AdvancedHTMLParser.xpath'],
scripts=['formatHTML'],
author='Tim Savannah',
author_email='[email protected]',
|
Ensure xpath subpackage is installed properly
|
py
|
diff --git a/squad/api/rest.py b/squad/api/rest.py
index <HASH>..<HASH> 100644
--- a/squad/api/rest.py
+++ b/squad/api/rest.py
@@ -1202,7 +1202,7 @@ class TestSerializer(DynamicFieldsModelSerializer, serializers.HyperlinkedModelS
class TestViewSet(NestedViewSetMixin, ModelViewSet):
queryset = Test.objects.prefetch_related('metadata').all()
- project_lookup_key = 'test_run__build__project__in'
+ project_lookup_key = 'build__project__in'
serializer_class = TestSerializer
filterset_class = TestFilter
filter_class = filterset_class # TODO: remove when django-filters 1.x is not supported anymore
|
api: rest: remove testrun reference in project lookup for test viewset
|
py
|
diff --git a/src/ossos/core/ossos/__version__.py b/src/ossos/core/ossos/__version__.py
index <HASH>..<HASH> 100644
--- a/src/ossos/core/ossos/__version__.py
+++ b/src/ossos/core/ossos/__version__.py
@@ -1 +1 @@
-version = "0.7.0b"
+version = "0.7.1"
|
New Version pushed to PIP.
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -15,11 +15,11 @@ def find_packages(path=".", prefix=""):
yield name
-with open("README.rst") as readme_file:
- readme = readme_file.read()
+with open("README.rst", 'rb') as readme_file:
+ readme = readme_file.read().decode('utf-8')
-with open("HISTORY.rst") as history_file:
- history = history_file.read()
+with open("HISTORY.rst", 'rb') as history_file:
+ history = history_file.read().decode('utf-8')
with open("requirements.txt") as requirements_file:
requirements = [
@@ -38,7 +38,7 @@ setup(
name="cumulusci",
version="2.2.0",
description="Build and release tools for Salesforce developers",
- long_description=readme + "\n\n" + history,
+ long_description=readme + u"\n\n" + history,
long_description_content_type="text/x-rst",
author="Salesforce.org",
author_email="[email protected]",
|
read long description as utf8
|
py
|
diff --git a/testsuite/test_tasks_datacite.py b/testsuite/test_tasks_datacite.py
index <HASH>..<HASH> 100644
--- a/testsuite/test_tasks_datacite.py
+++ b/testsuite/test_tasks_datacite.py
@@ -21,9 +21,12 @@ from __future__ import absolute_import
import httpretty
from mock import patch
-from invenio.testsuite import InvenioTestCase, make_test_suite, run_test_suite
+from invenio.testsuite import InvenioTestCase, make_test_suite, \
+ run_test_suite, nottest
+# FIXME externalize module
+@nottest
class DataCiteTasksTest(InvenioTestCase):
def setUp(self):
self.app.config['CFG_DATACITE_URL'] = 'https://mds.example.org/'
|
pidstore: testsuite disabling * Disables temporary the testsuite because they are using the old JsonAlchemy API.
|
py
|
diff --git a/axiom/attributes.py b/axiom/attributes.py
index <HASH>..<HASH> 100644
--- a/axiom/attributes.py
+++ b/axiom/attributes.py
@@ -101,7 +101,7 @@ class Comparable:
_likeOperators = ('LIKE', 'NOT LIKE')
def _like(self, op, *others):
if op.upper() not in self._likeOperators:
- raise ValueError, 'LIKE-style operators are: %s' % self._likeOperators
+ raise ValueError, 'LIKE-style operators are: %r' % self._likeOperators
if not others:
raise ValueError, 'Must pass at least one expression to _like'
|
use %r instead of %s when formatting the tuple of acceptable LIKE expressions
|
py
|
diff --git a/sanic/helpers.py b/sanic/helpers.py
index <HASH>..<HASH> 100644
--- a/sanic/helpers.py
+++ b/sanic/helpers.py
@@ -137,7 +137,7 @@ def remove_entity_headers(headers, allowed=("content-location", "expires")):
return headers
-def import_string(module_name):
+def import_string(module_name, package=None):
"""
import a module or class by string path.
@@ -148,7 +148,7 @@ def import_string(module_name):
"""
module, klass = module_name.rsplit(".", 1)
- module = import_module(module)
+ module = import_module(module, package=package)
obj = getattr(module, klass)
if ismodule(obj):
return obj
|
added param package to relative imports
|
py
|
diff --git a/src/transformers/tokenization_utils_base.py b/src/transformers/tokenization_utils_base.py
index <HASH>..<HASH> 100644
--- a/src/transformers/tokenization_utils_base.py
+++ b/src/transformers/tokenization_utils_base.py
@@ -825,7 +825,13 @@ class SpecialTokensMixin:
special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the
current vocabulary).
- Using : obj:`add_special_tokens` will ensure your special tokens can be used in several ways:
+ .. Note::
+ When adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of
+ the model so that its embedding matrix matches the tokenizer.
+
+ In order to do that, please use the :meth:`~transformers.PreTrainedModel.resize_token_embeddings` method.
+
+ Using :obj:`add_special_tokens` will ensure your special tokens can be used in several ways:
- Special tokens are carefully handled by the tokenizer (they are never split).
- You can easily refer to special tokens using tokenizer class attributes like :obj:`tokenizer.cls_token`. This
|
Adds a note to resize the token embedding matrix when adding special … (#<I>) * Adds a note to resize the token embedding matrix when adding special tokens * Remove superfluous space
|
py
|
diff --git a/visidata/vd.py b/visidata/vd.py
index <HASH>..<HASH> 100755
--- a/visidata/vd.py
+++ b/visidata/vd.py
@@ -1830,12 +1830,14 @@ def editText(scr, y, x, w, attr=curses.A_NORMAL, value='', fillchar=' ', unprint
dispval = clean(v)
else:
dispval = '*' * len(v)
- dispi = i
+ dispi = i # the onscreen offset within the field where v[i] is displayed
if len(dispval) < w:
dispval += fillchar*(w-len(dispval))
- elif i >= w:
+ elif i > w:
dispi = w-1
- dispval = dispval[i-w:]
+ dispval = dispval[i-w:i]
+ else:
+ dispval = dispval[:w]
scr.addstr(y, x, dispval, attr)
scr.move(y, x+dispi)
|
Fix #7; make editText stay within given width
|
py
|
diff --git a/pwnypack/shellcode/translate.py b/pwnypack/shellcode/translate.py
index <HASH>..<HASH> 100644
--- a/pwnypack/shellcode/translate.py
+++ b/pwnypack/shellcode/translate.py
@@ -128,6 +128,22 @@ def translate(env, func, *args, **kwargs):
else:
value[index] = new_value
+ elif op.name == 'INPLACE_ADD':
+ value = stack.pop()
+ reg = stack.pop()
+ if not isinstance(reg, Register):
+ raise TypeError('In-place addition is only supported on registers')
+ program.extend(env.reg_add(reg, value))
+ stack.append(reg)
+
+ elif op.name == 'INPLACE_SUBTRACT':
+ value = stack.pop()
+ reg = stack.pop()
+ if not isinstance(reg, Register):
+ raise TypeError('In-place subtraction is only supported on registers')
+ program.extend(env.reg_sub(reg, value))
+ stack.append(reg)
+
else:
raise RuntimeError('Unsupported opcode: %s' % op.name)
|
Support in-place add/subtract on registers.
|
py
|
diff --git a/mpd.py b/mpd.py
index <HASH>..<HASH> 100644
--- a/mpd.py
+++ b/mpd.py
@@ -1,6 +1,5 @@
#! /usr/bin/env python
-# TODO: implement argument checking/parsing (?)
# TODO: check for EOF when reading and benchmark it
# TODO: converter support
# TODO: global for parsing MPD_HOST/MPD_PORT
|
remove argument checking/parsing TODO item mpd already does a much better job of validating arguments than we ever could, so no point in us doing it too. Parsing arguments is also rather pointless, as all arguments are coerced to strings before escaping anyway.
|
py
|
diff --git a/pyfolio/utils.py b/pyfolio/utils.py
index <HASH>..<HASH> 100644
--- a/pyfolio/utils.py
+++ b/pyfolio/utils.py
@@ -47,10 +47,10 @@ ANNUALIZATION_FACTORS = {
MONTHLY: MONTHS_PER_YEAR
}
-DEPRECATION_WARNING = ("""Data loaders have been moved to empyrical and will
- be removed from pyfolio in a future release. Please use
- e.g. empyrical.utils.get_symbol_rets() instead of
- pyfolio.utils.get_symbol_rets()""")
+DEPRECATION_WARNING = ("Data loaders have been moved to empyrical and will "
+ "be removed from pyfolio in a future release. Please "
+ "use e.g. empyrical.utils.get_symbol_rets() instead "
+ "of pyfolio.utils.get_symbol_rets()")
def one_dec_places(x, pos):
|
DOC used " instead of """ for deprecation warning
|
py
|
diff --git a/tcex/bin/validate.py b/tcex/bin/validate.py
index <HASH>..<HASH> 100644
--- a/tcex/bin/validate.py
+++ b/tcex/bin/validate.py
@@ -17,6 +17,7 @@ from pydantic import ValidationError
from stdlib_list import stdlib_list
# first-party
+from tcex.app_config import Permutation
from tcex.app_config.install_json import InstallJson
from tcex.app_config.job_json import JobJson
from tcex.app_config.layout_json import LayoutJson
@@ -43,6 +44,7 @@ class Validate(BinABC):
def __init__(self, ignore_validation: bool) -> None:
"""Initialize Class properties."""
super().__init__()
+ self.permutations = Permutation()
self.ignore_validation = ignore_validation
# class properties
|
Add self.permutations to Validate class. (#<I>)
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -8,6 +8,7 @@ setup(
use_scm_version=True,
description=project_info.description,
+ long_description=project_info.description,
url=project_info.source_url,
@@ -29,4 +30,24 @@ setup(
],
python_requires='>=3',
+
+ # for pypi:
+
+ keywords='telegram bot api framework',
+
+ classifiers=[
+ 'Development Status :: 5 - Production/Stable',
+
+ 'Intended Audience :: Developers',
+ 'Topic :: Software Development :: Libraries :: Application Frameworks',
+
+ 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
+
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3 :: Only',
+ 'Programming Language :: Python :: 3.3',
+ 'Programming Language :: Python :: 3.4',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
+ ],
)
|
Add long_description (same as short description currently), keywords and classifiers to setup.py
|
py
|
diff --git a/salt/states/beacon.py b/salt/states/beacon.py
index <HASH>..<HASH> 100644
--- a/salt/states/beacon.py
+++ b/salt/states/beacon.py
@@ -31,6 +31,42 @@ Management of the Salt beacons
- 0.1
- 1.0
+ .. versionadded:: Neon
+
+ Beginning in the Neon release, multiple copies of a beacon can be configured
+ using the ``beacon_module`` parameter.
+
+ inotify_infs:
+ beacon.present:
+ - save: True
+ - enable: True
+ - files:
+ /etc/infs.conf:
+ mask:
+ - create
+ - delete
+ - modify
+ recurse: True
+ auto_add: True
+ - interval: 10
+ - beacon_module: inotify
+ - disable_during_state_run: True
+
+ inotify_ntp:
+ beacon.present:
+ - save: True
+ - enable: True
+ - files:
+ /etc/ntp.conf:
+ mask:
+ - create
+ - delete
+ - modify
+ recurse: True
+ auto_add: True
+ - interval: 10
+ - beacon_module: inotify
+ - disable_during_state_run: True
'''
from __future__ import absolute_import, print_function, unicode_literals
|
Porting PR #<I> to <I>
|
py
|
diff --git a/cli_ui/tests/test_cli_ui.py b/cli_ui/tests/test_cli_ui.py
index <HASH>..<HASH> 100644
--- a/cli_ui/tests/test_cli_ui.py
+++ b/cli_ui/tests/test_cli_ui.py
@@ -1,6 +1,5 @@
import datetime
import io
-import operator
import os
import re
from typing import Iterator
@@ -303,7 +302,7 @@ def test_ask_choice() -> None:
m.side_effect = ["nan", "5", "2"]
actual = cli_ui.ask_choice(
- "Select a fruit", choices=fruits, func_desc=operator.attrgetter("name")
+ "Select a fruit", choices=fruits, func_desc=func_desc
)
assert actual.name == "banana"
assert actual.price == 10
@@ -337,7 +336,7 @@ def test_select_choices() -> None:
with mock.patch("builtins.input") as m:
m.side_effect = ["nan", "5", "1, 2"]
actual = cli_ui.select_choices(
- "Select a fruit", choices=fruits, func_desc=operator.attrgetter("name")
+ "Select a fruit", choices=fruits, func_desc=func_desc
)
assert actual[0].name == "apple"
assert actual[0].price == 42
|
Fix two warnings found by SonarQube
|
py
|
diff --git a/src/pyctools/tools/setmetadata.py b/src/pyctools/tools/setmetadata.py
index <HASH>..<HASH> 100644
--- a/src/pyctools/tools/setmetadata.py
+++ b/src/pyctools/tools/setmetadata.py
@@ -32,7 +32,7 @@ Useful tags include: 'xlen', 'ylen' and 'fourcc'.
import argparse
import sys
-from ..core import Metadata
+from pyctools.core.metadata import Metadata
def main():
# get command args
|
One more pyctools.core import to change
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -202,7 +202,6 @@ cassandra = [
celery = [
'celery~=4.4.2',
'flower>=0.7.3, <1.0',
- 'tornado>=4.2.0, <6.0', # Dep of flower. Pin to a version that works on Py3.5.2
'vine~=1.3', # https://stackoverflow.com/questions/32757259/celery-no-module-named-five
]
cgroups = [
|
Unpin 'tornado' dep pulled in by flower (#<I>) 'tornado' version was pinned in <URL>
|
py
|
diff --git a/gglsbl/protocol.py b/gglsbl/protocol.py
index <HASH>..<HASH> 100644
--- a/gglsbl/protocol.py
+++ b/gglsbl/protocol.py
@@ -5,7 +5,7 @@ import struct
import time
from StringIO import StringIO
import random
-import os
+import posixpath
import re
import hashlib
import socket
@@ -334,7 +334,7 @@ class URL(object):
if not path:
path = '/'
has_trailing_slash = (path[-1] == '/')
- path = os.path.normpath(path).replace('//', '/')
+ path = posixpath.normpath(path).replace('//', '/')
if has_trailing_slash and path[-1] != '/':
path = path + '/'
user = url_parts.username
|
Fixed windows related bug around URL path normalization On a windows box, os.path uses windows path separators, which does not match URL path separators. The class posixpath is OS agnostic.
|
py
|
diff --git a/pandas/tseries/tests/test_timezones.py b/pandas/tseries/tests/test_timezones.py
index <HASH>..<HASH> 100644
--- a/pandas/tseries/tests/test_timezones.py
+++ b/pandas/tseries/tests/test_timezones.py
@@ -788,6 +788,19 @@ class TestTimeZoneSupportDateutil(TestTimeZoneSupportPytz):
def localize(self, tz, x):
return x.replace(tzinfo=tz)
+ def test_utc_with_system_utc(self):
+ from pandas.tslib import maybe_get_tz
+
+ # from system utc to real utc
+ ts = Timestamp('2001-01-05 11:56', tz=maybe_get_tz('dateutil/UTC'))
+ # check that the time hasn't changed.
+ self.assertEqual(ts, ts.tz_convert(dateutil.tz.tzutc()))
+
+ # from system utc to real utc
+ ts = Timestamp('2001-01-05 11:56', tz=maybe_get_tz('dateutil/UTC'))
+ # check that the time hasn't changed.
+ self.assertEqual(ts, ts.tz_convert(dateutil.tz.tzutc()))
+
class TestTimeZones(tm.TestCase):
_multiprocess_can_split_ = True
|
Add test for conversion between dateutil.tz.tzutc() and dateutil.tz.gettz('UTC')
|
py
|
diff --git a/benchbuild/project.py b/benchbuild/project.py
index <HASH>..<HASH> 100644
--- a/benchbuild/project.py
+++ b/benchbuild/project.py
@@ -227,7 +227,7 @@ class Project(metaclass=ProjectDecorator):
source: Sources = attr.ib(
default=attr.Factory(lambda self: type(self).SOURCE, takes_self=True))
- primary_source: source.BaseSource = attr.ib(init=False)
+ primary_source: str = attr.ib(init=False)
@primary_source.default
def __default_primary_source(self) -> str:
|
project: annotate the correct type
|
py
|
diff --git a/xrpc/actor.py b/xrpc/actor.py
index <HASH>..<HASH> 100644
--- a/xrpc/actor.py
+++ b/xrpc/actor.py
@@ -318,7 +318,7 @@ class SignalRunner(LoggingActor, TerminatingHandler):
except TerminationException:
act.terminate('sigte')
except:
- logging.getLogger(__name__ + '.' + self.__class__.__name__).exception('%s %s', sig, frame)
+ logging.getLogger(__name__ + '.' + self.__class__.__name__).exception('%s', sig)
def signal_handler(self, sig, frame):
self.logger('hdlr.x').warning('%s %s', sig, frame)
|
Actor if signal causes an exception it's incorrectly handled
|
py
|
diff --git a/pycbc/__init__.py b/pycbc/__init__.py
index <HASH>..<HASH> 100644
--- a/pycbc/__init__.py
+++ b/pycbc/__init__.py
@@ -73,13 +73,6 @@ try:
except ImportError:
HAVE_CUDA=False
-# Check for MKL capability
-try:
- import pycbc.fft.mkl
- HAVE_MKL=True
-except ImportError:
- HAVE_MKL=False
-
# Check for openmp suppport, currently we pressume it exists, unless on
# platforms (mac) that are silly and don't use the standard gcc.
if sys.platform == 'darwin':
@@ -109,3 +102,11 @@ _cache_dir_path = os.path.join(_tmp_dir, _cache_dir_name)
try: os.makedirs(_cache_dir_path)
except OSError: pass
os.environ['PYTHONCOMPILED'] = _cache_dir_path
+
+# Check for MKL capability
+try:
+ import pycbc.fft.mkl
+ HAVE_MKL=True
+except ImportError as e:
+ print e
+ HAVE_MKL=False
|
move mkl check to end
|
py
|
diff --git a/scripts/gen.py b/scripts/gen.py
index <HASH>..<HASH> 100644
--- a/scripts/gen.py
+++ b/scripts/gen.py
@@ -39,7 +39,7 @@ import sys
# - Need to figure out the correct Timestamp type
copyright_header = """\
-# Copyright (c) 2012-2019, Mark Peek <[email protected]>
+# Copyright (c) 2012-2020, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
@@ -523,6 +523,6 @@ if __name__ == '__main__':
f.add_property(class_name, properties)
if args.name:
- r.output_file(args.name)
+ r.output_file(args.name.lower())
else:
r.output_files()
|
Tweaks to the gen.py script
|
py
|
diff --git a/lib/svtplay_dl/service/mtvservices.py b/lib/svtplay_dl/service/mtvservices.py
index <HASH>..<HASH> 100644
--- a/lib/svtplay_dl/service/mtvservices.py
+++ b/lib/svtplay_dl/service/mtvservices.py
@@ -1,10 +1,8 @@
from __future__ import absolute_import
import sys
import re
-import json
import xml.etree.ElementTree as ET
-from svtplay_dl.utils import get_http_data
from svtplay_dl.service import Service
from svtplay_dl.utils import get_http_data, select_quality
from svtplay_dl.fetcher.http import download_http
@@ -26,7 +24,6 @@ class Mtvservices(Service):
start = data.index("<?xml version=")
data = data[start:]
xml = ET.XML(data)
- dada = xml.find("package")
ss = xml.find("video").find("item")
if sys.version_info < (2, 7):
sa = list(ss.getiterator("rendition"))
|
mtvservices: minor pylint cleanup
|
py
|
diff --git a/trakt/version.py b/trakt/version.py
index <HASH>..<HASH> 100644
--- a/trakt/version.py
+++ b/trakt/version.py
@@ -1 +1 @@
-__version__ = '2.7.0'
+__version__ = '2.7.1'
|
Bumped version to <I>
|
py
|
diff --git a/torchvision/datasets/lsun.py b/torchvision/datasets/lsun.py
index <HASH>..<HASH> 100644
--- a/torchvision/datasets/lsun.py
+++ b/torchvision/datasets/lsun.py
@@ -5,7 +5,11 @@ import os.path
import six
import string
import sys
-from collections import Iterable
+
+if sys.version_info < (3, 3):
+ from collections import Iterable
+else:
+ from collections.abc import Iterable
if sys.version_info[0] == 2:
import cPickle as pickle
|
Fix DeprecationWarning for collections.Iterable import (#<I>) * Fix DeprecationWarning for collections.Iterable import * Simplify version comparison
|
py
|
diff --git a/parse_this/__init__.py b/parse_this/__init__.py
index <HASH>..<HASH> 100644
--- a/parse_this/__init__.py
+++ b/parse_this/__init__.py
@@ -51,7 +51,7 @@ def parse_this(func, types, args=None, delimiter_chars=":"):
return _call(func, func_args, arguments)
-class create_parser(object):
+class MethodParser(object):
"""Creates an argument parser for the decorated function.
Note:
@@ -106,7 +106,10 @@ class create_parser(object):
return decorated
-class parse_class(object):
+create_parser = MethodParser
+
+
+class ClassParser(object):
"""Allows to create a global argument parser for a class along with
subparsers with each if its properly decorated methods."""
@@ -251,3 +254,6 @@ class parse_class(object):
return _call_method_from_namespace(instance, method_name, namespace)
return inner_call
+
+
+parse_class = ClassParser
|
Use file level variable to export so we can rename classes properly
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -67,7 +67,7 @@ setup(
version='1.10.4',
description='Strong lens modeling package.',
long_description=desc,
- author='Simon Birrer',
+ author='lenstronomy developers',
author_email='[email protected]',
url='https://github.com/lenstronomy/lenstronomy',
download_url='https://github.com/sibirrer/lenstronomy/archive/1.10.4.tar.gz',
@@ -76,7 +76,7 @@ setup(
include_package_data=True,
# setup_requires=requires,
install_requires=requires,
- license='MIT',
+ license='BSD-3',
zip_safe=False,
keywords='lenstronomy',
classifiers=[
|
changed authors in setup.py and license description
|
py
|
diff --git a/pyradio/radio.py b/pyradio/radio.py
index <HASH>..<HASH> 100644
--- a/pyradio/radio.py
+++ b/pyradio/radio.py
@@ -189,6 +189,7 @@ class PyRadio(object):
self.setStation(-1)
else:
jumpto=min(int(self.jumpnr)-1,len(self.stations)-1)
+ jumpto=max(0,jumpto)
self.setStation(jumpto)
self.jumpnr = ""
self.refreshBody()
|
Fix jumping if number is 0
|
py
|
diff --git a/source/rafcon/gui/mygaphas/items/state.py b/source/rafcon/gui/mygaphas/items/state.py
index <HASH>..<HASH> 100644
--- a/source/rafcon/gui/mygaphas/items/state.py
+++ b/source/rafcon/gui/mygaphas/items/state.py
@@ -451,13 +451,9 @@ class StateView(Element):
scoped_variable_v.draw(context, self)
if isinstance(self.model, LibraryStateModel) and not self.moving:
- max_width = width / 2.
- max_height = height / 2.
self._draw_symbol(context, constants.SIGN_LIB, gui_config.gtk_colors['STATE_NAME'], 0.75)
if self.moving:
- max_width = width - 2 * border_width
- max_height = height - 2 * border_width
self._draw_symbol(context, constants.SIGN_ARROW, gui_config.gtk_colors['STATE_NAME'])
def _draw_symbol(self, context, symbol, color, transparency=0.):
|
refactor(gaphas): remove obsolete lines
|
py
|
diff --git a/fusesoc/main.py b/fusesoc/main.py
index <HASH>..<HASH> 100644
--- a/fusesoc/main.py
+++ b/fusesoc/main.py
@@ -210,13 +210,16 @@ def list_cores(cm, args):
def gen_list(cm, args):
cores = cm.get_generators()
- print("\nAvailable generators:\n")
- maxlen = max(map(len,cores.keys()))
- print('Core'.ljust(maxlen) + ' Generator')
- print("="*(maxlen+12))
- for core in sorted(cores.keys()):
- for generator_name, generator_data in cores[core].items():
- print('{} : {} : {}'.format(core.ljust(maxlen), generator_name, generator_data.description or "<No description>"))
+ if not cores:
+ print("\nNo available generators\n")
+ else:
+ print("\nAvailable generators:\n")
+ maxlen = max(map(len,cores.keys()))
+ print('Core'.ljust(maxlen) + ' Generator')
+ print("="*(maxlen+12))
+ for core in sorted(cores.keys()):
+ for generator_name, generator_data in cores[core].items():
+ print('{} : {} : {}'.format(core.ljust(maxlen), generator_name, generator_data.description or "<No description>"))
def gen_show(cm, args):
cores = cm.get_generators()
|
Display sensible message for 'fusesoc gen list' when there are no generators.
|
py
|
diff --git a/zipline/assets/assets.py b/zipline/assets/assets.py
index <HASH>..<HASH> 100644
--- a/zipline/assets/assets.py
+++ b/zipline/assets/assets.py
@@ -146,12 +146,9 @@ class AssetFinder(object):
if asset is not None:
self._asset_cache[sid] = asset
- if asset is not None:
+ if (asset is not None) or default_none:
return asset
- elif default_none:
- return None
- else:
- raise SidNotFound(sid=sid)
+ raise SidNotFound(sid=sid)
def retrieve_all(self, sids, default_none=False):
return [self.retrieve_asset(sid, default_none) for sid in sids]
|
MAINT: Simplify conditional.
|
py
|
diff --git a/src/collectors/elasticsearch/elasticsearch.py b/src/collectors/elasticsearch/elasticsearch.py
index <HASH>..<HASH> 100644
--- a/src/collectors/elasticsearch/elasticsearch.py
+++ b/src/collectors/elasticsearch/elasticsearch.py
@@ -237,13 +237,18 @@ class ElasticSearchCollector(diamond.collector.Collector):
metrics['jvm.threads.count'] = jvm['threads']['count']
gc = jvm['gc']
- metrics['jvm.gc.collection.count'] = gc['collection_count']
- metrics['jvm.gc.collection.time'] = gc['collection_time_in_millis']
+ collection_count = 0
+ collection_time_in_millis = 0
for collector, d in gc['collectors'].iteritems():
metrics['jvm.gc.collection.%s.count' % collector] = d[
'collection_count']
+ collection_count += d['collection_count']
metrics['jvm.gc.collection.%s.time' % collector] = d[
'collection_time_in_millis']
+ collection_time_in_millis += d['collection_time_in_millis']
+ # calculate the totals, as they're absent in elasticsearch > 0.90.10
+ metrics['jvm.gc.collection.count'] = collection_count
+ metrics['jvm.gc.collection.time'] = collection_time_in_millis
#
# thread_pool
|
Calculate the aggregated gc stats as they're absent in elastisearch > <I>.
|
py
|
diff --git a/sos/plugins/ovirt.py b/sos/plugins/ovirt.py
index <HASH>..<HASH> 100644
--- a/sos/plugins/ovirt.py
+++ b/sos/plugins/ovirt.py
@@ -38,12 +38,10 @@ class Ovirt(Plugin, RedHatPlugin):
DB_PASS_FILES = re.compile(
flags=re.VERBOSE,
- pattern=r"""
- ^/etc/
+ pattern=r"""^/etc/
(rhevm|ovirt-engine|ovirt-engine-dwh)/
(engine.conf|ovirt-engine-dwhd.conf)
- (\.d/.+.conf.*?)?$
- """
+ (\.d/.+.conf.*?)?$"""
)
DEFAULT_SENSITIVE_KEYS = (
|
[ovirt] fix regex formatting LGTM warns about unmatchable caret and dollar symbols in ovirt's DB_PASS_FILES since they are surrounded by leading and trailing whitespace. Resolves: #<I>
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -35,9 +35,9 @@ CLASSIFIERS = [
'Programming Language :: Python :: 2.7'
]
-SUMMARY = "Digital Ocean API v2 with SSH integration"
+DESCRIPTION = "Digital Ocean API v2 with SSH integration"
-DESCRIPTION = """
+LONG_DESCRIPTION = """
********************************
poseidon: tame the digital ocean
********************************
@@ -95,6 +95,7 @@ Deploy a new Flask app from github
ssh.pip_r('requirements.txt')
ssh.nohup('python app.py') # DNS takes a while to propagate
print ssh.ps()
+
"""
setup(
@@ -107,6 +108,7 @@ setup(
keywords=['digitalocean', 'digital ocean', 'digital', 'ocean', 'api', 'v2',
'web programming', 'cloud', 'digitalocean api v2'],
description=DESCRIPTION,
+ long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
download_url=DOWNLOAD_URL,
package_data={'': ['requirements.txt']},
|
CLN: fix long description and description in setup
|
py
|
diff --git a/troposphere/codedeploy.py b/troposphere/codedeploy.py
index <HASH>..<HASH> 100644
--- a/troposphere/codedeploy.py
+++ b/troposphere/codedeploy.py
@@ -91,6 +91,7 @@ class DeploymentGroup(AWSObject):
'AutoScalingGroups': ([basestring], False),
'Deployment': (Deployment, False),
'DeploymentConfigName': (basestring, False),
+ 'DeploymentGroupName': (basestring, False),
'Ec2TagFilters': ([Ec2TagFilters], False),
'OnPremisesInstanceTagFilters': (OnPremisesInstanceTagFilters, False),
'ServiceRoleArn': (basestring, True),
|
DeploymentGroupName property missing DeploymentGroupName property missing from the DeploymentGroup Resource
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -12,7 +12,7 @@ def read(fname):
setup(
name='gittar',
- version='0.3dev',
+ version='0.5dev',
description='The inverse of git archive. Adds a new commit from an archive'
'or the filesystem.',
long_description=read('README.rst'),
|
Next version will be <I>.
|
py
|
diff --git a/jnrbase/cmdline.py b/jnrbase/cmdline.py
index <HASH>..<HASH> 100644
--- a/jnrbase/cmdline.py
+++ b/jnrbase/cmdline.py
@@ -149,8 +149,7 @@ def gen_text(env: TextIOBase, package: str, tmpl: str):
else:
env_args = {}
jinja_env = template.setup(package)
- tmpl = jinja_env.get_template(tmpl)
- echo(tmpl.render(**env_args))
+ echo(jinja_env.get_template(tmpl).render(**env_args))
@cli.command(help=_('Time the output of a command'))
|
[QA] Don't shadow args to ease type checking
|
py
|
diff --git a/lambdas/es/indexer/document_queue.py b/lambdas/es/indexer/document_queue.py
index <HASH>..<HASH> 100644
--- a/lambdas/es/indexer/document_queue.py
+++ b/lambdas/es/indexer/document_queue.py
@@ -24,7 +24,7 @@ CONTENT_INDEX_EXTS = [
]
# See https://amzn.to/2xJpngN for chunk size as a function of container size
-CHUNK_LIMIT_BYTES = 20_000_000
+CHUNK_LIMIT_BYTES = 9_500_000
ELASTIC_TIMEOUT = 30
MAX_BACKOFF = 360 #seconds
MAX_RETRY = 4 # prevent long-running lambdas due to malformed calls
|
Don't send HTTP requests that are too large (#<I>)
|
py
|
diff --git a/mongo_connector/connector.py b/mongo_connector/connector.py
index <HASH>..<HASH> 100644
--- a/mongo_connector/connector.py
+++ b/mongo_connector/connector.py
@@ -745,12 +745,13 @@ def get_config_options():
dm['targetURL'] = None
if not dm.get('uniqueKey'):
dm['uniqueKey'] = constants.DEFAULT_UNIQUE_KEY
- if not dm.get('autoCommitInterval'):
+ if dm.get('autoCommitInterval') is None:
dm['autoCommitInterval'] = constants.DEFAULT_COMMIT_INTERVAL
if not dm.get('args'):
dm['args'] = {}
- if dm['autoCommitInterval'] and dm['autoCommitInterval'] < 0:
+ aci = dm['autoCommitInterval']
+ if aci is not None and aci < 0:
raise errors.InvalidConfiguration(
"autoCommitInterval must be non-negative.")
|
Fix autoCommitInterval when set to 0.
|
py
|
diff --git a/tensorboardX/proto/__init__.py b/tensorboardX/proto/__init__.py
index <HASH>..<HASH> 100644
--- a/tensorboardX/proto/__init__.py
+++ b/tensorboardX/proto/__init__.py
@@ -1,4 +0,0 @@
-try:
- from . import event_pb2
-except ImportError:
- raise RuntimeError('Run "./compile.sh" to compile protobuf bindings.')
|
Remove import check from proto/ (#<I>)
|
py
|
diff --git a/docker_scripts/squash.py b/docker_scripts/squash.py
index <HASH>..<HASH> 100644
--- a/docker_scripts/squash.py
+++ b/docker_scripts/squash.py
@@ -436,3 +436,5 @@ class Squash:
self.log.info("Finished, image registered as '%s:%s'" %
(image_name, image_tag))
+
+ return new_image_id
|
Return new image ID in the run method Fixes #<I>
|
py
|
diff --git a/nomenclate/version.py b/nomenclate/version.py
index <HASH>..<HASH> 100644
--- a/nomenclate/version.py
+++ b/nomenclate/version.py
@@ -1 +1 @@
-__version__ = '2.1.6'
+__version__ = '2.1.7'
|
versioned up to <I>
|
py
|
diff --git a/lib/websearch_webinterface.py b/lib/websearch_webinterface.py
index <HASH>..<HASH> 100644
--- a/lib/websearch_webinterface.py
+++ b/lib/websearch_webinterface.py
@@ -741,7 +741,7 @@ class WebInterfaceSearchInterfacePages(WebInterfaceDirectory):
target = '/collection/' + quote(c)
target += make_canonical_urlargd(argd, legacy_collection_default_urlargd)
- return redirect_to_url(req, target, apache.HTTP_MOVED_PERMANENTLY)
+ return redirect_to_url(req, target)
def display_collection(req, c, as, verbose, ln):
"""Display search interface page for collection c by looking
|
Removed HTTP_MOVED_PERMANENTLY for security until proxy bugs are resolved.
|
py
|
diff --git a/etesync/api.py b/etesync/api.py
index <HASH>..<HASH> 100644
--- a/etesync/api.py
+++ b/etesync/api.py
@@ -49,9 +49,11 @@ class EteSync:
self._set_db(database)
- def _init_db_tables(self, database):
+ def _init_db_tables(self, database, additional_tables=None):
database.create_tables([cache.Config, pim.Content, cache.User, cache.JournalEntity,
cache.EntryEntity, cache.UserInfo], safe=True)
+ if additional_tables:
+ database.create_tables(additional_tables, safe=True)
db_version = cache.Config.get_or_none()
if db_version is None:
|
Support extending the database with additional tables.
|
py
|
diff --git a/moderator/admin.py b/moderator/admin.py
index <HASH>..<HASH> 100644
--- a/moderator/admin.py
+++ b/moderator/admin.py
@@ -200,7 +200,10 @@ class CommentAdmin(DjangoCommentsAdmin):
def content(self, obj, *args, **kwargs):
content_type = obj.content_type
- content = self.ct_map[content_type][int(obj.object_pk)]
+ if not int(obj.object_pk) in self.ct_map[content_type]:
+ content = obj
+ else:
+ content = self.ct_map[content_type][int(obj.object_pk)]
url = reverse('admin:%s_%s_moderate' % (
content_type.app_label,
content_type.model
|
fix error when ct_map cache doesn't have the object
|
py
|
diff --git a/sqliteschema/_table_extractor.py b/sqliteschema/_table_extractor.py
index <HASH>..<HASH> 100644
--- a/sqliteschema/_table_extractor.py
+++ b/sqliteschema/_table_extractor.py
@@ -33,7 +33,7 @@ class SqliteSchemaTableExtractorV0(SqliteSchemaTextExtractorV0):
return 0
@property
- def _table_clasts(self):
+ def _table_writer_class(self):
return ptw.RstSimpleTableWriter
@property
@@ -78,7 +78,7 @@ class SqliteSchemaTableExtractorV0(SqliteSchemaTextExtractorV0):
values.get(header) for header in self._header_list
])
- writer = self._table_clasts()
+ writer = self._table_writer_class()
writer.stream = six.StringIO()
writer.table_name = table_name
writer.header_list = self._header_list
@@ -97,7 +97,7 @@ class SqliteSchemaTableExtractorV1(SqliteSchemaTableExtractorV0):
return 1
@property
- def _table_clasts(self):
+ def _table_writer_class(self):
return ptw.RstGridTableWriter
@property
|
Rename a property to be more precisely represent the functionality
|
py
|
diff --git a/rollbar/contrib/fastapi/utils.py b/rollbar/contrib/fastapi/utils.py
index <HASH>..<HASH> 100644
--- a/rollbar/contrib/fastapi/utils.py
+++ b/rollbar/contrib/fastapi/utils.py
@@ -45,15 +45,25 @@ def get_installed_middlewares(app):
ASGIReporterMiddleware,
)
- if not hasattr(app, 'user_middleware'):
+ if hasattr(app, 'user_middleware'): # FastAPI v0.51.0+
+ return [
+ middleware.cls
+ for middleware in app.user_middleware
+ if middleware.cls in candidates
+ ]
+ elif hasattr(app, 'error_middleware'):
+ middleware = app.error_middleware
+ middlewares = []
+
+ while hasattr(middleware, 'app'):
+ if isinstance(middleware, candidates):
+ middlewares.append(middleware)
+ middleware = middleware.app
+
+ return [middleware.__class__ for middleware in middlewares]
+ else:
return []
- return [
- middleware.cls
- for middleware in app.user_middleware
- if middleware.cls in candidates
- ]
-
def has_bare_routing(app_or_router):
expected_app_routes = 4
|
Fix get_installed_middlewares() for older FastAPI versions
|
py
|
diff --git a/src/ai/backend/common/__init__.py b/src/ai/backend/common/__init__.py
index <HASH>..<HASH> 100644
--- a/src/ai/backend/common/__init__.py
+++ b/src/ai/backend/common/__init__.py
@@ -1 +1 @@
-__version__ = '22.03.0b3'
+__version__ = '22.03.0b4'
|
release: <I>b4
|
py
|
diff --git a/salt/fileserver/__init__.py b/salt/fileserver/__init__.py
index <HASH>..<HASH> 100644
--- a/salt/fileserver/__init__.py
+++ b/salt/fileserver/__init__.py
@@ -532,3 +532,26 @@ class Fileserver(object):
(x, y) for x, y in ret.items() if x.startswith(prefix)
])
return ret
+
+
+class FSChan(object):
+ '''
+ A class that mimics the transport channels allowing for local access to
+ to the fileserver class class structure
+ '''
+ def __init__(self, opts, **kwargs):
+ self.opts = opts
+ self.kwargs = kwargs
+ self.fs = Fileserver(self.opts)
+
+ def send(self, load, tries=None, timeout=None):
+ '''
+ Emulate the channel send method, the tries and timeout are not used
+ '''
+ if 'cmd' not in load:
+ log.error('Malformed request: {0}'.format(load))
+ return {}
+ if not hasattr(self.fs, load['cmd']):
+ log.error('Malformed request: {0}'.format(load))
+ return {}
+ return getattr(self.fs, load['cmd'])(load)
|
Add initial FSChan object This object will allow us to replace a channel in the file client with a local channel that just uses the file server class
|
py
|
diff --git a/IPython/html/widgets/widget.py b/IPython/html/widgets/widget.py
index <HASH>..<HASH> 100644
--- a/IPython/html/widgets/widget.py
+++ b/IPython/html/widgets/widget.py
@@ -175,7 +175,8 @@ class Widget(LoggingConfigurable):
def _comm_changed(self, name, new):
"""Called when the comm is changed."""
- self.comm = new
+ if new is None:
+ return
self._model_id = self.model_id
self.comm.on_msg(self._handle_msg)
|
handle setting Widget.comm = None which is done in `Widget.close` fixes loads of warnings in widget test output, caused every time a widget is closed.
|
py
|
diff --git a/src/saml2/client.py b/src/saml2/client.py
index <HASH>..<HASH> 100644
--- a/src/saml2/client.py
+++ b/src/saml2/client.py
@@ -79,7 +79,7 @@ class Saml2Client(object):
self.users = Population(identity_cache)
# for server state storage
- if not state_cache:
+ if state_cache is None:
self.state = {} # in memory storage
else:
self.state = state_cache
@@ -717,7 +717,7 @@ class Saml2Client(object):
(headers, _body) = http_redirect_message(str(response),
destination,
- rstate)
+ rstate, 'SAMLResponse')
return headers, success
@@ -732,7 +732,7 @@ class Saml2Client(object):
"""
if binding == BINDING_HTTP_REDIRECT:
- return http_redirect_logout_request(request, subject_id, log)
+ return self.http_redirect_logout_request(request, subject_id, log)
def make_logout_response(self, idp_entity_id, request_id,
status_code, binding=BINDING_HTTP_REDIRECT):
|
Bugs in logout request (caught by Lorenzo)
|
py
|
diff --git a/menuconfig.py b/menuconfig.py
index <HASH>..<HASH> 100755
--- a/menuconfig.py
+++ b/menuconfig.py
@@ -2028,10 +2028,10 @@ def _menu_path_info(node):
path = ""
- menu = node.parent
- while menu is not _kconf.top_node:
- path = " -> " + menu.prompt[0] + path
- menu = menu.parent
+ node = _parent_menu(node)
+ while node is not _kconf.top_node:
+ path = " -> " + node.prompt[0] + path
+ node = _parent_menu(node)
return "(top menu)" + path
|
menuconfig: Don't show implicit menus in symbol info Only show "real" (non-indented) menus, like in the menu path at the top.
|
py
|
diff --git a/mysql/toolkit/components/query.py b/mysql/toolkit/components/query.py
index <HASH>..<HASH> 100644
--- a/mysql/toolkit/components/query.py
+++ b/mysql/toolkit/components/query.py
@@ -2,7 +2,7 @@ from differentiate import diff
from mysql.toolkit.utils import get_col_val_str, join_cols, wrap
-MAX_ROWS_PER_QUERY = 100000
+MAX_ROWS_PER_QUERY = 50000
class Query:
@@ -112,11 +112,13 @@ class Query:
if len(values) > limit:
while len(values) > 0:
vals = [values.pop(0) for i in range(0, min(limit, len(values)))]
- self._cursor.executemany(statement, vals)
+ self._cursor.executemany(statement, values)
+ self._commit()
else:
# Execute statement
self._cursor.executemany(statement, values)
+ self._commit()
self._printer('\tMySQL rows (' + str(len(values)) + ') successfully INSERTED')
def update(self, table, columns, values, where):
|
Decreased MAX_ROWS_PER_QUERY to <I>,<I> and added commit statement after execute
|
py
|
diff --git a/doc/conf.py b/doc/conf.py
index <HASH>..<HASH> 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -219,3 +219,7 @@ sphinx_gallery_conf = {
"bokeh": "http://bokeh.pydata.org/en/latest/",
},
}
+
+
+def setup(app):
+ app.add_javascript('https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js')
\ No newline at end of file
|
Try messing with conf to get plotly plots to display
|
py
|
diff --git a/util/gen-go-studio.py b/util/gen-go-studio.py
index <HASH>..<HASH> 100644
--- a/util/gen-go-studio.py
+++ b/util/gen-go-studio.py
@@ -24,7 +24,7 @@ def after_install(options, home_dir):
if not os.path.exists(etc):
os.makedirs(etc)
- subprocess.call([join(bin_dir, 'easy_install'), 'Studio'])
+ subprocess.call([join(bin_dir, 'easy_install'), 'Studio==%s'])
"""
|
allows generating a go-studio script for a specific Studio version
|
py
|
diff --git a/openstack_dashboard/dashboards/project/instances/tests.py b/openstack_dashboard/dashboards/project/instances/tests.py
index <HASH>..<HASH> 100644
--- a/openstack_dashboard/dashboards/project/instances/tests.py
+++ b/openstack_dashboard/dashboards/project/instances/tests.py
@@ -918,8 +918,6 @@ class InstanceTests(helpers.TestCase):
self.assertItemsEqual(res.context['instance'].volumes, volumes)
- self.assertItemsEqual(res.context['instance'].volumes, volumes)
-
def test_instance_details_volume_sorting(self):
server = self.servers.first()
volumes = self.volumes.list()[1:3]
@@ -949,8 +947,7 @@ class InstanceTests(helpers.TestCase):
1)
self.assertContains(res, "<dd><!--</dd>", 1)
self.assertContains(res, "<dt>empty</dt>", 1)
- # TODO(david-lyle): uncomment when fixed with Django 1.6
- # self.assertContains(res, "<dd><em>N/A</em></dd>", 1)
+ self.assertContains(res, "<dd><em>N/A</em></dd>", 1)
def test_instance_details_fault(self):
server = self.servers.first()
|
Remove an unnessary assert call in InstanceTests Also uncomment an old test case since Django is now <<I>, >=<I>. Change-Id: I<I>d<I>f<I>b8f<I>c<I>ab3f<I>c<I>d<I>f<I>b<I>ad6
|
py
|
diff --git a/src/jukeboxmaya/addons/scenerelease/scenerelease.py b/src/jukeboxmaya/addons/scenerelease/scenerelease.py
index <HASH>..<HASH> 100644
--- a/src/jukeboxmaya/addons/scenerelease/scenerelease.py
+++ b/src/jukeboxmaya/addons/scenerelease/scenerelease.py
@@ -196,5 +196,6 @@ class MayaSceneRelease(JB_MayaStandaloneGuiPlugin):
except models.TaskFile.DoesNotExist:
pass
else:
- self.rw.browser.set_selection(f)
+ if f.releasetype == 'work':
+ self.rw.browser.set_selection(f)
self.rw.show()
|
Fix release window set selection if releasetype not work
|
py
|
diff --git a/slither/solc_parsing/solidity_types/type_parsing.py b/slither/solc_parsing/solidity_types/type_parsing.py
index <HASH>..<HASH> 100644
--- a/slither/solc_parsing/solidity_types/type_parsing.py
+++ b/slither/solc_parsing/solidity_types/type_parsing.py
@@ -160,7 +160,10 @@ def parse_type(t, caller_context):
elif t[key] == 'UserDefinedTypeName':
if is_compact_ast:
return _find_from_type_name(t['typeDescriptions']['typeString'], contract, contracts, structures, enums)
- return _find_from_type_name(t['attributes']['type'], contract, contracts, structures, enums)
+
+ # Determine if we have a type node (otherwise we use the name node, as some older solc did not have 'type').
+ type_name_key = 'type' if 'type' in t['attributes'] else key
+ return _find_from_type_name(t['attributes'][type_name_key], contract, contracts, structures, enums)
elif t[key] == 'ArrayTypeName':
length = None
|
Fixed an issue where 'type' node would not exist on old solc in legacy-ast.
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -16,7 +16,7 @@ setup(
package_dir={'bottle_auth': 'bottle_auth'},
- install_requires=['webob', 'bottle-mongo'],
+ install_requires=['webob', 'bottle-mongo', 'bottle-beaker'],
classifiers=[
'Environment :: Web Environment',
'Environment :: Plugins',
|
add bottle beaker plugin on requires file
|
py
|
diff --git a/master/buildbot/status/github.py b/master/buildbot/status/github.py
index <HASH>..<HASH> 100644
--- a/master/buildbot/status/github.py
+++ b/master/buildbot/status/github.py
@@ -18,9 +18,13 @@ import datetime
from twisted.internet import defer
from twisted.python import log
-from txgithub.api import GithubApi as GitHubAPI
+try:
+ from txgithub.api import GithubApi as GitHubAPI
+except ImportError:
+ GitHubAPI = None
from zope.interface import implements
+from buildbot import config
from buildbot.interfaces import IStatusReceiver
from buildbot.process.properties import Interpolate
from buildbot.status.base import StatusReceiverMultiService
@@ -43,6 +47,9 @@ class GitHubStatus(StatusReceiverMultiService):
"""
Token for GitHub API.
"""
+ if not GitHubAPI:
+ config.error('GitHubStatus requires txgithub package installed')
+
StatusReceiverMultiService.__init__(self)
if not sha:
|
make bulidbot.status.github not bail out on missing txgithub
|
py
|
diff --git a/tests/test_boards.py b/tests/test_boards.py
index <HASH>..<HASH> 100644
--- a/tests/test_boards.py
+++ b/tests/test_boards.py
@@ -92,6 +92,12 @@ def test_led_board_bad_init():
with pytest.raises(GPIOPinMissing):
leds = LEDBoard(pwm=True)
+def test_led_bar_graph_bad_init():
+ with pytest.raises(GPIOPinMissing):
+ leds = LEDBarGraph()
+ with pytest.raises(GPIOPinMissing):
+ leds = LEDBarGraph(pwm=True)
+
def test_led_board_on_off():
pin1 = Device.pin_factory.pin(2)
pin2 = Device.pin_factory.pin(3)
|
Add tests for bad LEDBarGraph init
|
py
|
diff --git a/nbserverproxy/handlers.py b/nbserverproxy/handlers.py
index <HASH>..<HASH> 100644
--- a/nbserverproxy/handlers.py
+++ b/nbserverproxy/handlers.py
@@ -385,16 +385,18 @@ class SuperviseAndProxyHandler(LocalProxyHandler):
self.log.info('Starting process...')
proc.set_exit_callback(exit_callback)
- for i in range(5):
+ for i in range(8):
if (await self.is_running(proc)):
self.log.info('{} startup complete'.format(self.name))
break
# Simple exponential backoff
- wait_time = max(1.4 ** i, 5)
- self.log.debug('Waiting {} before checking if {} is up'.format(wait_time, self.name))
+ wait_time = 1.4 ** i
+ self.log.debug('Waiting {} seconds before checking if {} is up'.format(wait_time, self.name))
await gen.sleep(wait_time)
else:
+ # clear starting state for failed start
self.state.pop('starting', None)
+ # terminate process
proc.terminate()
raise web.HTTPError(500, 'could not start {} in time'.format(self.name))
|
fix exponential backoff for subprocess max(<I>**i, 5) is always 5 for i < 5
|
py
|
diff --git a/tools/run_tests/report_utils.py b/tools/run_tests/report_utils.py
index <HASH>..<HASH> 100644
--- a/tools/run_tests/report_utils.py
+++ b/tools/run_tests/report_utils.py
@@ -47,7 +47,7 @@ def _filter_msg(msg, output_format):
# that make XML report unparseable.
filtered_msg = filter(
lambda x: x in string.printable and x != '\f' and x != '\v',
- msg.decode(errors='ignore'))
+ msg.decode('UTF-8', 'ignore'))
if output_format == 'HTML':
filtered_msg = filtered_msg.replace('"', '"')
return filtered_msg
|
allow run_tests.py to pass under python <I>
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -18,7 +18,7 @@ setup(
name = 'identity-toolkit-python-client',
packages = packages,
license="Apache 2.0",
- version = '0.1.7',
+ version = '0.1.8',
description = 'Google Identity Toolkit python client library',
author = 'Jin Liu',
url = 'https://github.com/google/identity-toolkit-python-client',
|
Update version number to <I>
|
py
|
diff --git a/boyle/commands.py b/boyle/commands.py
index <HASH>..<HASH> 100644
--- a/boyle/commands.py
+++ b/boyle/commands.py
@@ -12,6 +12,7 @@
#------------------------------------------------------------------------------
import os
+import sys
import shutil
import subprocess
import logging
|
Fix imports in commands.py
|
py
|
diff --git a/parsl/tests/conftest.py b/parsl/tests/conftest.py
index <HASH>..<HASH> 100644
--- a/parsl/tests/conftest.py
+++ b/parsl/tests/conftest.py
@@ -5,6 +5,10 @@ import shutil
import subprocess
from glob import glob
from itertools import chain
+import signal
+import sys
+import threading
+import traceback
import pytest
import _pytest.runner as runner
@@ -16,6 +20,25 @@ from parsl.tests.utils import get_rundir
logger = logging.getLogger('parsl')
+def dumpstacks(sig, frame):
+ s = ''
+ try:
+ thread_names = {thread.ident: thread.name for thread in threading.enumerate()}
+ tf = sys._current_frames()
+ for thread_id, frame in tf.items():
+ s += '\n\nThread: %s (%d)' % (thread_names[thread_id], thread_id)
+ s += ''.join(traceback.format_stack(frame))
+ except Exception:
+ s = traceback.format_exc()
+ with open(os.getenv('HOME') + '/parsl_stack_dump.txt', 'w') as f:
+ f.write(s)
+ print(s)
+
+
+def pytest_sessionstart(session):
+ signal.signal(signal.SIGUSR1, dumpstacks)
+
+
def pytest_addoption(parser):
"""Add parsl-specific command-line options to pytest.
"""
|
Dump stack traces in pytest on SIGUSR1 (#<I>) When testing, dump a stack trace in ~/parsl_stack_dump.txt when SIGUSR1 is received.
|
py
|
diff --git a/tests/test_emr/test_emr.py b/tests/test_emr/test_emr.py
index <HASH>..<HASH> 100644
--- a/tests/test_emr/test_emr.py
+++ b/tests/test_emr/test_emr.py
@@ -364,7 +364,7 @@ def test_cluster_tagging():
cluster = conn.describe_cluster(cluster_id)
cluster.tags.should.have.length_of(2)
- tags = {tag.key: tag.value for tag in cluster.tags}
+ tags = dict((tag.key, tag.value) for tag in cluster.tags)
tags['tag1'].should.equal('val1')
tags['tag2'].should.equal('val2')
@@ -372,5 +372,5 @@ def test_cluster_tagging():
conn.remove_tags(cluster_id, ["tag1"])
cluster = conn.describe_cluster(cluster_id)
cluster.tags.should.have.length_of(1)
- tags = {tag.key: tag.value for tag in cluster.tags}
+ tags = dict((tag.key, tag.value) for tag in cluster.tags)
tags['tag2'].should.equal('val2')
|
No native dict comprehensions in py<I>.
|
py
|
diff --git a/ontobio/__init__.py b/ontobio/__init__.py
index <HASH>..<HASH> 100644
--- a/ontobio/__init__.py
+++ b/ontobio/__init__.py
@@ -1,6 +1,6 @@
from __future__ import absolute_import
-__version__ = '0.2.29'
+__version__ = '0.2.30'
from .ontol_factory import OntologyFactory
from .ontol import Ontology, Synonym, TextDefinition
|
=Upgrade to <I>
|
py
|
diff --git a/c3d.py b/c3d.py
index <HASH>..<HASH> 100644
--- a/c3d.py
+++ b/c3d.py
@@ -863,8 +863,10 @@ class Reader(Manager):
self._handle.seek((self.header.data_block - 1) * 512)
for frame_no in range(self.first_frame(), self.last_frame() + 1):
- raw = np.fromfile(self._handle, dtype=point_dtype,
- count=4 * self.header.point_count).reshape((ppf, 4))
+ n = 4 * self.header.point_count
+ raw = np.fromstring(self._handle.read(n * point_bytes),
+ dtype=point_dtype,
+ count=n).reshape((self.point_used, 4))
points[:, :3] = raw[:, :3] * point_scale
|
Use fromstring to read point data.
|
py
|
diff --git a/Qt.py b/Qt.py
index <HASH>..<HASH> 100644
--- a/Qt.py
+++ b/Qt.py
@@ -79,6 +79,9 @@ def _pyside2():
def load_ui(ui_filepath, *args, **kwargs):
"""Wrap QtUiTools.QUiLoader().load()
for compatibility against PyQt5.uic.loadUi()
+
+ Args:
+ ui_filepath (str): The filepath to the .ui file
"""
from PySide2 import QtUiTools
return QtUiTools.QUiLoader().load(ui_filepath)
@@ -99,6 +102,9 @@ def _pyside():
def load_ui(ui_filepath, *args, **kwargs):
"""Wrap QtUiTools.QUiLoader().load()
for compatibility against PyQt5.uic.loadUi()
+
+ Args:
+ ui_filepath (str): The filepath to the .ui file
"""
from PySide import QtUiTools
return QtUiTools.QUiLoader().load(ui_filepath)
|
Added args description to docstrings
|
py
|
diff --git a/drivers/python/rethinkdb/handshake.py b/drivers/python/rethinkdb/handshake.py
index <HASH>..<HASH> 100644
--- a/drivers/python/rethinkdb/handshake.py
+++ b/drivers/python/rethinkdb/handshake.py
@@ -121,7 +121,7 @@ class HandshakeV1_0(object):
elif self._state == 1:
response = response.decode("utf-8")
if response.startswith("ERROR"):
- raise ReqlDriverError("Received an unexpected reply, you may be connected to an earlier version of the RethinkDB server.")
+ raise ReqlDriverError("Received an unexpected reply. You may be attempting to connect to a RethinkDB server that is too old for this driver. The minimum supported server version is 2.3.0.")
json = self._json_decoder.decode(response)
try:
if json["success"] == False:
|
Small change to error message OTS by @danielmewes
|
py
|
diff --git a/billy/site/browse/views.py b/billy/site/browse/views.py
index <HASH>..<HASH> 100644
--- a/billy/site/browse/views.py
+++ b/billy/site/browse/views.py
@@ -415,6 +415,8 @@ def mom_commit(request):
leg2 = db.legislators.find_one({'_id' : leg2 })
actions.append( "Loaded Legislator '%s as `leg2''" % leg2['leg_id'] )
+ # XXX: Re-direct on None
+
merged, remove = merge_legislators( leg1, leg2 )
actions.append( "Merged Legislators as '%s'" % merged['leg_id'] )
|
Adding in a note for myself later
|
py
|
diff --git a/tests.py b/tests.py
index <HASH>..<HASH> 100644
--- a/tests.py
+++ b/tests.py
@@ -4,5 +4,24 @@ from preconditions import PreconditionError, preconditions
class InvalidPreconditionTests (TestCase):
- def test_varargs_in_precondition(self):
- self.assertRaises(PreconditionError, preconditions, lambda *a: a)
+ def test_varargs(self):
+ self.assertRaises(PreconditionError, preconditions, lambda *a: True)
+
+ def test_kwargs(self):
+ self.assertRaises(PreconditionError, preconditions, lambda **kw: True)
+
+ def test_unknown_nondefault_param(self):
+ # The preconditions refer to "x" but are applied to "a, b", so
+ # "x" is unknown:
+ p = preconditions(lambda x: True)
+
+ self.assertRaises(PreconditionError, p, lambda a, b: a+b)
+
+ def test_default_masks_param(self):
+ # Preconditions may have defaults as a hack to bind local
+ # variables (such as when declared syntactically inside loops),
+ # but this "closure hack" must not mask application function
+ # parameter names:
+ p = preconditions(lambda a, b='a stored value': True)
+
+ self.assertRaises(PreconditionError, p, lambda a, b: a+b)
|
Add tests for other precondition definition errors.
|
py
|
diff --git a/tryp/logging.py b/tryp/logging.py
index <HASH>..<HASH> 100644
--- a/tryp/logging.py
+++ b/tryp/logging.py
@@ -22,9 +22,14 @@ class Logger(logging.Logger):
if self.isEnabledFor(DDEBUG):
self._log(DDEBUG, message, args, **kws)
+ def caught_exception(self, when, exc, *a, **kw):
+ headline = 'caught exception during {}'.format(when)
+ self.exception(headline, exc_info=(type(exc), exc, exc.__traceback__))
+
logging.Logger.verbose = Logger.verbose # type: ignore
logging.Logger.ddebug = Logger.ddebug # type: ignore
+logging.Logger.caught_exception = Logger.caught_exception # type: ignore
log = tryp_root_logger = logging.getLogger('tryp')
|
Logger.caught_exception prints exceptions outside of an except handler
|
py
|
diff --git a/script/create-dist.py b/script/create-dist.py
index <HASH>..<HASH> 100755
--- a/script/create-dist.py
+++ b/script/create-dist.py
@@ -38,6 +38,7 @@ TARGET_BINARIES = {
'msvcp120.dll',
'msvcr120.dll',
'node.dll',
+ 'pdf.dll',
'content_resources_200_percent.pak',
'ui_resources_200_percent.pak',
'xinput1_3.dll',
|
Ship pdf.dll in release.
|
py
|
diff --git a/winrm/transport.py b/winrm/transport.py
index <HASH>..<HASH> 100644
--- a/winrm/transport.py
+++ b/winrm/transport.py
@@ -105,7 +105,7 @@ class Transport(object):
else:
if not self.username:
raise InvalidCredentialsError("auth method %s requires a username" % self.auth_method)
- if not self.password:
+ if self.password is None:
raise InvalidCredentialsError("auth method %s requires a password" % self.auth_method)
self.session = None
|
allow blank passwords for ntlm/basic auth
|
py
|
diff --git a/troposphere/ec2.py b/troposphere/ec2.py
index <HASH>..<HASH> 100644
--- a/troposphere/ec2.py
+++ b/troposphere/ec2.py
@@ -3,7 +3,7 @@
#
# See LICENSE file for full license.
-from . import AWSHelperFn, AWSObject, AWSProperty, Ref
+from . import AWSHelperFn, AWSObject, AWSProperty, FindInMap, Ref
from .validators import boolean, integer, integer_range, network_port
@@ -99,7 +99,7 @@ class NetworkInterfaceProperty(AWSProperty):
'DeleteOnTermination': (boolean, False),
'Description': (basestring, False),
'DeviceIndex': (integer, True),
- 'GroupSet': ([basestring, Ref], False),
+ 'GroupSet': ([basestring, FindInMap, Ref], False),
'NetworkInterfaceId': (basestring, False),
'PrivateIpAddress': (basestring, False),
'PrivateIpAddresses': ([PrivateIpAddressSpecification], False),
|
Allow FindInMap to be used with ec2.NetworkInterfaceProperty.GroupSet Fixes #<I>. Note: may need to think about how to address this in a more generic way instead of specifying on a per-property approach.
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -11,7 +11,7 @@ install_requires = [
setup(
name='Attitude',
- version="0.1.1",
+ version="0.1.2",
description="Attitude computes the orientations of planes from point data.",
license='MIT',
keywords='gis data computation fitting statistics vector science geology',
|
The one fix itself is worthy of a release.
|
py
|
diff --git a/examples/csv_gen/sccsv/generator.py b/examples/csv_gen/sccsv/generator.py
index <HASH>..<HASH> 100644
--- a/examples/csv_gen/sccsv/generator.py
+++ b/examples/csv_gen/sccsv/generator.py
@@ -45,7 +45,8 @@ def gen_csv(sc, filename, field_list, source, filters):
# First thing we need to do is initialize the csvfile and build the header
# for the file.
- csvfile = csv.writer(open(filename, 'wb'))
+ datafile = open(filename, 'wb')
+ csvfile = csv.writer(datafile)
header = []
for field in field_list:
header.append(fields.fields[field]['name'])
@@ -59,4 +60,7 @@ def gen_csv(sc, filename, field_list, source, filters):
fparams = {'fobj': csvfile, 'flist': field_list}
sc.query('vulndetails', source=source,
func=writer, func_params=fparams, **filters)
- debug.write('\n')
\ No newline at end of file
+ debug.write('\n')
+
+ # Lastly we need to close the datafile.
+ datafile.close()
\ No newline at end of file
|
Adjusted the generator to properly close the file.
|
py
|
diff --git a/docs/conf.py b/docs/conf.py
index <HASH>..<HASH> 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -46,7 +46,7 @@ master_doc = 'index'
# General information about the project.
project = u'BoxPacker'
-copyright = u'2018, Doug Wright'
+copyright = u'2019, Doug Wright'
author = u'Doug Wright'
# The version info for the project you're documenting, acts as replacement for
|
Bump copyright year in docs
|
py
|
diff --git a/hydra_base/lib/template.py b/hydra_base/lib/template.py
index <HASH>..<HASH> 100644
--- a/hydra_base/lib/template.py
+++ b/hydra_base/lib/template.py
@@ -1029,7 +1029,7 @@ def _set_typeattr(typeattr, existing_ta = None):
ta.data_restriction = _parse_data_restriction(typeattr.data_restriction)
- if typeattr.dimension is not None and typeattr.attr_id is not None:
+ if typeattr.dimension is not None and typeattr.attr_id is not None and typeattr.attr_id > 0:
attr = ta.get_attr()
if attr.attr_dimen != typeattr.dimension:
raise HydraError("Cannot set a dimension on type attribute which "
|
ignore negative ids for resource attrs in attr_ids
|
py
|
diff --git a/spyderlib/widgets/externalshell/startup.py b/spyderlib/widgets/externalshell/startup.py
index <HASH>..<HASH> 100644
--- a/spyderlib/widgets/externalshell/startup.py
+++ b/spyderlib/widgets/externalshell/startup.py
@@ -21,13 +21,11 @@ def __is_ipython():
return os.environ.get('IPYTHON')
def __patching_matplotlib__():
- import imp
try:
- imp.find_module('matplotlib')
+ from spyderlib import mpl_patch
+ mpl_patch.apply()
except ImportError:
return
- from spyderlib import mpl_patch
- mpl_patch.apply()
def __create_banner():
"""Create shell banner"""
@@ -52,7 +50,7 @@ if __name__ == "__main__":
__run_pythonstartup_script()
__patching_matplotlib__()
-
+
for _name in ['__run_pythonstartup_script', '__run_init_commands',
'__create_banner', '__commands__', 'command', '__file__',
'__remove_sys_argv__', '__patching_matplotlib__']+['_name']:
|
External console's startup: avoid error when matplotlib's patch can't be imported
|
py
|
diff --git a/greg/classes.py b/greg/classes.py
index <HASH>..<HASH> 100644
--- a/greg/classes.py
+++ b/greg/classes.py
@@ -33,6 +33,7 @@ import json
from pkg_resources import resource_filename
from urllib.parse import urlparse
from urllib.error import URLError
+from warnings import warn
import greg.aux_functions as aux
@@ -110,10 +111,14 @@ class Feed():
if self.willtag:
self.defaulttagdict = self.default_tag_dict()
self.mime = self.retrieve_mime()
- try:
- self.wentwrong = str(self.podcast["bozo_exception"])
- except KeyError:
- self.wentwrong = False
+ self.wentwrong = False
+ if self.podcast.bozo: # the bozo bit is on, see feedparser docs
+ warning = str(self.podcast["bozo_exception"])
+ if "URLError" in warning:
+ self.wentwrong = warning
+ else:
+ warn("""This feed is malformed (possibly in unimportant ways):
+ {}""".format(warning), stacklevel=10)
self.info = os.path.join(session.data_dir, feed)
self.entrylinks, self.linkdates = aux.parse_feed_info(self.info)
|
Only issue a warning unless the bozo exception is URLError
|
py
|
diff --git a/ella/__init__.py b/ella/__init__.py
index <HASH>..<HASH> 100644
--- a/ella/__init__.py
+++ b/ella/__init__.py
@@ -1,4 +1,4 @@
-VERSION = (3, 0, 5)
+VERSION = (3, 0, 6)
__version__ = VERSION
__versionstr__ = '.'.join(map(str, VERSION))
|
version bump <I> so we can go on with new django_appdata
|
py
|
diff --git a/glances/plugins/glances_processlist.py b/glances/plugins/glances_processlist.py
index <HASH>..<HASH> 100644
--- a/glances/plugins/glances_processlist.py
+++ b/glances/plugins/glances_processlist.py
@@ -411,7 +411,7 @@ class Plugin(GlancesPlugin):
msg = '{0:>6}'.format('IOW/s')
ret.append(self.curse_add_line(msg, sort_style if process_sort_key == 'io_counters' else 'DEFAULT', optional=True, additional=True))
msg = ' {0:8}'.format('Command')
- ret.append(self.curse_add_line(msg))
+ ret.append(self.curse_add_line(msg, sort_style if process_sort_key == 'name' else 'DEFAULT'))
if glances_processes.is_tree_enabled():
ret.extend(self.get_process_tree_curses_data(
|
Solve issue: Process list header not decorating when sorting by command #<I>
|
py
|
diff --git a/pyemma/_base/progress/reporter.py b/pyemma/_base/progress/reporter.py
index <HASH>..<HASH> 100644
--- a/pyemma/_base/progress/reporter.py
+++ b/pyemma/_base/progress/reporter.py
@@ -60,15 +60,15 @@ class ProgressReporter(object):
def _prog_rep_descriptions(self):
# stores progressbar description strings per stage. Can contain format parameters
if not hasattr(self, '_ProgressReporter__prog_rep_descriptions'):
- self.__prog_rep_descriptions = {}#defaultdict(str)
+ self.__prog_rep_descriptions = {}
return self.__prog_rep_descriptions
@property
def _prog_rep_callbacks(self):
# store callback by stage
- if not hasattr(self, '_ProgressReporter__callbacks'):
- self.__callbacks = {}
- return self.__callbacks
+ if not hasattr(self, '_ProgressReporter__prog_rep_callbacks'):
+ self.__prog_rep_callbacks = {}
+ return self.__prog_rep_callbacks
def _progress_register(self, amount_of_work, description='', stage=0):
""" Registers a progress which can be reported/displayed via a progress bar.
|
[reporter] renamed private attribute to match name-style of other attributes.
|
py
|
diff --git a/src/requirementslib/models/requirements.py b/src/requirementslib/models/requirements.py
index <HASH>..<HASH> 100644
--- a/src/requirementslib/models/requirements.py
+++ b/src/requirementslib/models/requirements.py
@@ -1570,6 +1570,10 @@ class VCSRequirement(FileRequirement):
ref = None
if "@" in link.path and "@" in uri:
uri, _, ref = uri.rpartition("@")
+ if path is not None and "@" in path:
+ path, _ref = path.rsplit("@", 1)
+ if ref is None:
+ ref = _ref
if relpath and "@" in relpath:
relpath, ref = relpath.rsplit("@", 1)
creation_args = {
|
Small adjustment to path fix for vcs requirements
|
py
|
diff --git a/epub_date.py b/epub_date.py
index <HASH>..<HASH> 100644
--- a/epub_date.py
+++ b/epub_date.py
@@ -55,7 +55,7 @@ class DateInfo(object):
monthlist = ['January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November',
'December']
- retstr = '{0} {1}, {2}'.format(monthlist[self.month], self.day,
+ retstr = '{0} {1}, {2}'.format(monthlist[self.month - 1], self.day,
self.year)
return retstr
|
Fixed an issue with month indexing in niceString()
|
py
|
diff --git a/src/canmatrix/tests/test_frame_encoding.py b/src/canmatrix/tests/test_frame_encoding.py
index <HASH>..<HASH> 100644
--- a/src/canmatrix/tests/test_frame_encoding.py
+++ b/src/canmatrix/tests/test_frame_encoding.py
@@ -12,7 +12,7 @@ def loadDbc():
return canmatrix.formats.loadp(os.path.join(here ,"test_frame_decoding.dbc"), flatImport = True)
-def test_encode_with_dbc_little_endian():
+def test_encode_with_dbc_big_endian():
cm = loadDbc()
# 002#0C00057003CD1F83
frame = cm.frameById(1)
|
Fix test method name to actually execute the function. (#<I>)
|
py
|
diff --git a/peyotl/nexson_syntax/__init__.py b/peyotl/nexson_syntax/__init__.py
index <HASH>..<HASH> 100755
--- a/peyotl/nexson_syntax/__init__.py
+++ b/peyotl/nexson_syntax/__init__.py
@@ -187,12 +187,19 @@ def convert_nexson_format(blob,
return blob
def write_as_json(blob, dest, indent=0, sort_keys=True):
+ opened_out = False
if isinstance(dest, str) or isinstance(dest, unicode):
out = codecs.open(dest, mode='w', encoding='utf-8')
+ opened_out = True
else:
out = dest
- json.dump(blob, out, indent=indent, sort_keys=sort_keys)
- out.write('\n')
+ try:
+ json.dump(blob, out, indent=indent, sort_keys=sort_keys)
+ out.write('\n')
+ finally:
+ out.flush()
+ if opened_out:
+ out.close()
def read_as_json(infi):
inpf = codecs.open(infi)
|
flushing buffer in write_as_json
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -2,7 +2,7 @@ from setuptools import setup
setup (
name='jinja2-highlight',
- version='0.2.2',
+ version='0.3',
description='Jinja2 extension to highlight source code using Pygments',
keywords = 'syntax highlighting',
author='Tasos Latsas',
|
make release <I> to include readme file in package
|
py
|
diff --git a/checkers/stdlib.py b/checkers/stdlib.py
index <HASH>..<HASH> 100644
--- a/checkers/stdlib.py
+++ b/checkers/stdlib.py
@@ -42,7 +42,7 @@ class OpenModeChecker(BaseChecker):
'See http://docs.python.org/2/library/functions.html#open'),
}
- @utils.check_messages('W1501')
+ @utils.check_messages('bad-open-mode')
def visit_callfunc(self, node):
"""Visit a CallFunc node."""
if hasattr(node, 'func'):
@@ -59,7 +59,7 @@ class OpenModeChecker(BaseChecker):
mode_arg = utils.safe_infer(mode_arg)
if (isinstance(mode_arg, astroid.Const)
and not re.match(_VALID_OPEN_MODE_REGEX, mode_arg.value)):
- self.add_message('W1501', node=node, args=(mode_arg.value))
+ self.add_message('bad-open-mode', node=node, args=(mode_arg.value))
except (utils.NoSuchArgumentError, TypeError):
pass
|
Only emit symbolic warnings from the stdlib checker.
|
py
|
diff --git a/Lib/mutatorMath/test/ufo/test.py b/Lib/mutatorMath/test/ufo/test.py
index <HASH>..<HASH> 100644
--- a/Lib/mutatorMath/test/ufo/test.py
+++ b/Lib/mutatorMath/test/ufo/test.py
@@ -449,7 +449,11 @@ if __name__ == "__main__":
>>> bias, mut = buildMutator(items, w)
>>> assert bias == Location(aaaa=50)
- >>> assert mut.keys() == [(('aaaa', 100),), (('aaaa', 50),), ()]
+ >>> expect = [(('aaaa', 100),), (('aaaa', 50),), ()]
+ >>> expect.sort()
+ >>> got = mut.keys()
+ >>> got.sort()
+ >>> assert got == expect
>>> assert mut.makeInstance(Location(aaaa=300)) == 0
>>> assert mut.makeInstance(Location(aaaa=400)) == 50
>>> assert mut.makeInstance(Location(aaaa=700)) == 100
|
Test failed as it relied on the order of keys returned from a dict.
|
py
|
diff --git a/ospd/ospd.py b/ospd/ospd.py
index <HASH>..<HASH> 100644
--- a/ospd/ospd.py
+++ b/ospd/ospd.py
@@ -1051,7 +1051,11 @@ class OSPDaemon:
try:
output = subprocess.check_output(cmd)
- except subprocess.CalledProcessError as e:
+ except (
+ subprocess.CalledProcessError,
+ PermissionError,
+ FileNotFoundError,
+ ) as e:
raise OspdCommandError(
'Bogus get_performance format. %s' % e,
'get_performance'
|
Add exception for PermissionError and FileNotFoundError.
|
py
|
diff --git a/test/acceptance/test_cli.py b/test/acceptance/test_cli.py
index <HASH>..<HASH> 100644
--- a/test/acceptance/test_cli.py
+++ b/test/acceptance/test_cli.py
@@ -6,13 +6,9 @@ import sys
class TestCLI(unittest.TestCase):
- def assertRegex(self, string, pattern):
- assertRegexpMatches = getattr(self, 'assertRegexpMatches', None)
- if assertRegexpMatches:
- assertRegexpMatches(string, pattern)
- return
-
- super(TestCLI, self).assertRegex(string, pattern)
+ if sys.version_info <= (3,):
+ def assertRegex(self, string, pattern):
+ return super(TestCLI, self).assertRegexpMatches(string, pattern)
def assertReturnedStdoutEqual(self, expected_stdout, args):
@@ -139,7 +135,7 @@ class TestCLI(unittest.TestCase):
got_output = context_manager.exception.output
- expected_output_pattern = '\\033\['
+ expected_output_pattern = r'\033\['
self.assertRegex(got_output, expected_output_pattern)
|
tests: fix some DeprecationWarnings
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -1,7 +1,7 @@
from setuptools import setup, find_packages
setup(name='amusement',
- version='0.1.1',
+ version='0.1.2',
description='A python package and CLI to get wait times for rides at various theme parks',
url='http://github.com/astephen2/amusement',
author='Alex Stephen',
|
pushed up to pypi so I had to change the verison number
|
py
|
diff --git a/wikitextparser/_wikitext.py b/wikitextparser/_wikitext.py
index <HASH>..<HASH> 100644
--- a/wikitextparser/_wikitext.py
+++ b/wikitextparser/_wikitext.py
@@ -897,7 +897,7 @@ class WikiText:
return_spans.sort()
spans.sort()
if not recursive:
- return_spans = _filter_inner_spans(return_spans)
+ return_spans = _outer_spans(return_spans)
return [
Table(lststr, type_to_spans, sp, 'Table') for sp in return_spans]
@@ -1124,11 +1124,12 @@ class SubWikiText(WikiText):
return None
-def _filter_inner_spans(sorted_spans: List[List[int]]) -> Iterable[List[int]]:
+def _outer_spans(sorted_spans: List[List[int]]) -> Iterable[List[int]]:
"""Yield the outermost intervals."""
for i, span in enumerate(sorted_spans):
+ se = span[1]
for ps, pe in islice(sorted_spans, None, i):
- if span[1] < pe:
+ if se < pe:
break
else: # none of the previous spans included span
yield span
|
rename _filter_inner_spans to _outer_spans
|
py
|
diff --git a/glue/ligolw/lsctables.py b/glue/ligolw/lsctables.py
index <HASH>..<HASH> 100644
--- a/glue/ligolw/lsctables.py
+++ b/glue/ligolw/lsctables.py
@@ -1153,6 +1153,8 @@ class SimInspiralTable(table.Table):
"numrel_mode_max": "int_4s",
"numrel_data": "lstring",
"amp_order": "int_4s",
+ "taper": "lstring",
+ "bandpass": "int_4s",
"simulation_id": "ilwd:char"
}
constraints = "PRIMARY KEY (simulation_id)"
|
Added the new columns. Kipp has already approved the changes.
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.