diff
stringlengths 139
3.65k
| message
stringlengths 8
627
| diff_languages
stringclasses 1
value |
---|---|---|
diff --git a/autolens/autopipe/non_linear.py b/autolens/autopipe/non_linear.py
index <HASH>..<HASH> 100644
--- a/autolens/autopipe/non_linear.py
+++ b/autolens/autopipe/non_linear.py
@@ -5,6 +5,7 @@ import math
import os
import pymultinest
import scipy.optimize
+import numpy as np
from autolens.imaging import hyper_image
from autolens.config import config
from autolens.autopipe import model_mapper as mm
@@ -246,7 +247,7 @@ class MultiNest(NonLinearOptimizer):
self.result = None
self.instance_from_physical_vector = instance_from_physical_vector
self.constant = constant
- self.max_likelihood = 0.
+ self.max_likelihood = -np.inf
def __call__(self, cube, ndim, nparams, lnew):
instance = self.instance_from_physical_vector(cube)
|
Max likelihood in multinest now -inf
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100755
--- a/setup.py
+++ b/setup.py
@@ -7,7 +7,7 @@ __version__ = '0.98pre'
setup(name='visidata',
version=__version__,
- install_requires='python-dateutil openpyxl xlrd h5py psycopg2 pyshp'.split(),
+ install_requires='python-dateutil openpyxl xlrd h5py psycopg2 pyshp mapbox-vector-tile'.split(),
description='curses interface for exploring and arranging tabular data',
long_description=open('README.md').read(),
author='Saul Pwanson',
|
Add mapbox-vector-tile to setup requirements
|
py
|
diff --git a/angr/exploration_techniques/tracer.py b/angr/exploration_techniques/tracer.py
index <HASH>..<HASH> 100644
--- a/angr/exploration_techniques/tracer.py
+++ b/angr/exploration_techniques/tracer.py
@@ -98,7 +98,7 @@ class Tracer(ExplorationTechnique):
def step(self, simgr, stash='active', **kwargs):
simgr.drop(stash='missed')
- return simgr.step(stash, **kwargs)
+ return simgr.step(stash=stash, **kwargs)
def step_state(self, simgr, state, **kwargs):
# maintain the predecessors list
|
Fix an incorrect argument passing (stash passed in as n).
|
py
|
diff --git a/iktomi/unstable/utils/image_resizers.py b/iktomi/unstable/utils/image_resizers.py
index <HASH>..<HASH> 100644
--- a/iktomi/unstable/utils/image_resizers.py
+++ b/iktomi/unstable/utils/image_resizers.py
@@ -100,13 +100,14 @@ class ResizeCrop(Resizer):
class ResizeMixed(Resizer):
- def __init__(self, hor_resize, vert_resize):
+ def __init__(self, hor_resize, vert_resize, rate=1):
self.hor_resize = hor_resize
self.vert_resize = vert_resize
+ self.rate = rate
def get_resizer(self, size, target_size):
sw, sh = size
- if sw >= sh:
+ if sw >= sh * self.rate:
return self.hor_resize
else:
return self.vert_resize
|
ResizeMixed now accepts rate=1 arg
|
py
|
diff --git a/mixbox/fields.py b/mixbox/fields.py
index <HASH>..<HASH> 100644
--- a/mixbox/fields.py
+++ b/mixbox/fields.py
@@ -42,7 +42,7 @@ def _import_class(classpath):
Raises:
ImportError: If an error occurs while importing the module.
- KeyError: IF the class does not exist in the imported module.
+ AttributeError: IF the class does not exist in the imported module.
"""
modname, classname = classpath.rsplit(".", 1)
module = importlib.import_module(modname)
|
Fixed Exception type in Raises section of _import_class docstring.
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100755
--- a/setup.py
+++ b/setup.py
@@ -27,6 +27,8 @@ except ImportError as exp:
if (sys.version_info[0]) < 3:
print("using distutils. install setuptools for more options", file=sys.stderr)
+sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
+
import distutilazy
import distutilazy.clean
|
Make sure setup.py imports distutilazy from current package
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -76,7 +76,7 @@ else:
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
- install_requires=['numpy', 'scipy', 'matplotlib>2.2,<=2.2.4', 'matplotlib-scalebar', 'future', 'pandas==0.23.4', 'bokeh==2.1.1'],
+ install_requires=['numpy', 'scipy', 'matplotlib', 'matplotlib-scalebar', 'future', 'pandas', 'bokeh'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
|
Removed specific version requirements from dependencies
|
py
|
diff --git a/mavproxy.py b/mavproxy.py
index <HASH>..<HASH> 100755
--- a/mavproxy.py
+++ b/mavproxy.py
@@ -1342,9 +1342,10 @@ def set_stream_rates():
rate = mpstate.settings.streamrate
else:
rate = mpstate.settings.streamrate2
- master.mav.request_data_stream_send(mpstate.status.target_system, mpstate.status.target_component,
- mavutil.mavlink.MAV_DATA_STREAM_ALL,
- rate, 1)
+ if rate != -1:
+ master.mav.request_data_stream_send(mpstate.status.target_system, mpstate.status.target_component,
+ mavutil.mavlink.MAV_DATA_STREAM_ALL,
+ rate, 1)
def check_link_status():
'''check status of master links'''
|
use streamrate of -1 to mean to not set stream rates
|
py
|
diff --git a/stripy-src/setup.py b/stripy-src/setup.py
index <HASH>..<HASH> 100755
--- a/stripy-src/setup.py
+++ b/stripy-src/setup.py
@@ -39,16 +39,15 @@ if __name__ == "__main__":
url = "https://github.com/underworldcode/stripy",
version = "0.7.0",
description = "Python interface to TRIPACK and STRIPACK fortran code for triangulation/interpolation in Cartesian coordinates and on a sphere",
- long_description=long_description,
+ long_description = long_description,
long_description_content_type='text/markdown',
ext_modules = [ext1, ext2, ext3, ext4],
packages = ['stripy'],
- install_requires = ['numpy'],
+ install_requires = ['numpy', 'scipy>=0.15.0'],
package_data = {'stripy': ['Notebooks/CartesianTriangulations/*ipynb', # Worked Examples is not currently used
'Notebooks/SphericalTriangulations/*ipynb',
'Notebooks/Data/*'] },
include_package_data = True,
- install_requires = ['numpy', 'scipy'],
classifiers = ['Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
|
Bug in setup.py Also force installation of scipy version >= <I>
|
py
|
diff --git a/luigi/worker.py b/luigi/worker.py
index <HASH>..<HASH> 100644
--- a/luigi/worker.py
+++ b/luigi/worker.py
@@ -197,9 +197,9 @@ class Worker(object):
self.__scheduled_tasks[task.task_id] = task
deps = task.deps()
for d in deps:
- self._validate_dependency(task)
+ self._validate_dependency(d)
- deps = [d.task_id for d in task.deps()]
+ deps = [d.task_id for d in deps]
self.__scheduler.add_task(self.__id, task.task_id, status=PENDING,
deps=deps, runnable=True)
logger.info('Scheduled %s' % task.task_id)
|
fixed bug in dependency validation error messages should be a lot more helpful now
|
py
|
diff --git a/pyethereum/ethclient.py b/pyethereum/ethclient.py
index <HASH>..<HASH> 100755
--- a/pyethereum/ethclient.py
+++ b/pyethereum/ethclient.py
@@ -7,11 +7,15 @@ from docopt import docopt
import utils
import transactions
from . import __version__
+from . config import read_config
-api_path = '/api/v02a'
+config = read_config()
-DEFAULT_HOST = 'localhost'
-DEFAULT_PORT = 30203
+api_path = config.get('api', 'api_path')
+assert api_path.startswith('/') and not api_path.endswith('/')
+
+DEFAULT_HOST = config.get('api', 'listen_host')
+DEFAULT_PORT = config.getint('api', 'listen_port')
DEFAULT_GASPRICE = 10**12
DEFAULT_STARTGAS = 10000
|
use config in ethclient
|
py
|
diff --git a/aigerbv/expr.py b/aigerbv/expr.py
index <HASH>..<HASH> 100644
--- a/aigerbv/expr.py
+++ b/aigerbv/expr.py
@@ -12,7 +12,8 @@ def constk(k, size=None):
nonlocal size
if size is None:
size = expr.size
- return cmn.source(size, k, signed=False)
+ return cmn.source(size, k, signed=False) \
+ | cmn.sink(expr.size, expr.inputs)
return _constk
@@ -177,8 +178,14 @@ def ite(test, expr_true, expr_false):
return (~test | expr_true) & (test | expr_false)
-def atom(wordlen: int, val: Union[str, int], signed: bool = True) -> Expr:
+Val = Union[str, int, None]
+
+
+def atom(wordlen: int, val: Val, signed: bool = True) -> Expr:
output = cmn._fresh()
+ if val is None:
+ val = cmn._fresh()
+
if isinstance(val, str):
aig = cmn.identity_gate(wordlen, val, output)
else:
|
Maintain #inputs in x ^ x and support var=None in atom
|
py
|
diff --git a/bumpy.py b/bumpy.py
index <HASH>..<HASH> 100644
--- a/bumpy.py
+++ b/bumpy.py
@@ -22,6 +22,7 @@ LOCALE = {
'help_aliases': '\taliases: {}',
'help_requires': '\trequires: {}',
'help_unknown': 'unknown task: {}',
+ 'shell': '$ {}',
}
LIST = []
@@ -210,6 +211,10 @@ def valid(*things):
return req.valid
def shell(command):
+ global CONFIG
+ if 'shell' not in CONFIG['suppress']:
+ print LOCALE['shell'].format(command)
+
try:
return subprocess.check_output(command, shell=True)
except subprocess.CalledProcessError, ex:
|
Make `shell(...)` print out the command before executing
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@ from setuptools import setup
setup(
name='Flask-MarrowMailer',
- version='0.2.0',
+ version='0.3.0',
url='http://github.com/miguelgrinberg/Flask-MarrowMailer/',
license='MIT',
author='Miguel Grinberg',
@@ -23,7 +23,6 @@ setup(
install_requires=[
'Flask',
'marrow.mailer',
- 'futures'
],
test_suite = "test_marrowmailer",
classifiers=[
|
remove not needed futures as a dependency
|
py
|
diff --git a/bitshares/memo.py b/bitshares/memo.py
index <HASH>..<HASH> 100644
--- a/bitshares/memo.py
+++ b/bitshares/memo.py
@@ -73,11 +73,14 @@ class Memo(object):
if not memo_wif:
raise MissingKeyError("Memo key for %s missing!" % self.from_account["name"])
+ if not hasattr(self, 'chain_prefix'):
+ self.chain_prefix = self.blockchain.prefix
+
enc = BtsMemo.encode_memo(
PrivateKey(memo_wif),
PublicKey(
self.to_account["options"]["memo_key"],
- prefix=self.blockchain.prefix
+ prefix=self.chain_prefix
),
nonce,
memo
@@ -120,9 +123,12 @@ class Memo(object):
"Need any of {}".format(
[memo["to"], memo["from"]]))
+ if not hasattr(self, 'chain_prefix'):
+ self.chain_prefix = self.blockchain.prefix
+
return BtsMemo.decode_memo(
PrivateKey(memo_wif),
- PublicKey(pubkey, prefix=self.blockchain.prefix),
+ PublicKey(pubkey, prefix=self.chain_prefix),
memo.get("nonce"),
memo.get("message")
)
|
Allow chain_prefix in Memo to be overwritten. This is useful for off-line mode.
|
py
|
diff --git a/test/test_models.py b/test/test_models.py
index <HASH>..<HASH> 100644
--- a/test/test_models.py
+++ b/test/test_models.py
@@ -603,7 +603,7 @@ def test_classification_model(model_fn, dev):
"input_shape": (1, 3, 224, 224),
}
model_name = model_fn.__name__
- if dev == "cuda" and SKIP_BIG_MODEL and model_name in skipped_big_models:
+ if SKIP_BIG_MODEL and model_name in skipped_big_models:
pytest.skip("Skipped to reduce memory usage. Set env var SKIP_BIG_MODEL=0 to enable test for this model")
kwargs = {**defaults, **_model_params.get(model_name, {})}
num_classes = kwargs.get("num_classes")
|
Skip big models on both cpu and gpu test to fix CI(#<I>)
|
py
|
diff --git a/epdb/epdb.py b/epdb/epdb.py
index <HASH>..<HASH> 100755
--- a/epdb/epdb.py
+++ b/epdb/epdb.py
@@ -13,6 +13,7 @@
""" Extended pdb """
import bdb
+import stackutil
import inspect
import pdb
import os
@@ -32,9 +33,8 @@ import tempfile
import traceback
try:
- from epdb import telnetserver
- from epdb import telnetclient
- from epdb import stackutil
+ import telnetserver
+ import telnetclient
hasTelnet = True
except ImportError:
hasTelnet = False
|
Undo change to absolute imports; it breaks on py<I>
|
py
|
diff --git a/netshowlib/linux/iface.py b/netshowlib/linux/iface.py
index <HASH>..<HASH> 100644
--- a/netshowlib/linux/iface.py
+++ b/netshowlib/linux/iface.py
@@ -9,6 +9,7 @@ import netshowlib.linux.ipaddr as ipaddr
import os
import re
from datetime import datetime
+import netshowlib.linux.stp.kernel as kernel_stp
"""
Variables for port type bitmap entry
@@ -93,7 +94,7 @@ class Iface(object):
# -----------------------
- def read_strsymlink(self, attr):
+ def read_symlink(self, attr):
"""
:return symlink under a /sys/class/net iface config.
"""
|
fix typo in read_symlink
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -6,6 +6,7 @@ import shutil
import glob
import multiprocessing
from distutils.command.build import build as st_build
+from distutils.util import get_platform
from setuptools import setup
from setuptools.command.develop import develop as st_develop
@@ -122,6 +123,10 @@ cmdclass = {
'sdist': sdist,
}
+if "bdist_wheel" in sys.argv and "--plat-name" not in sys.argv:
+ sys.argv.append("--plat-name")
+ sys.argv.append(get_platform())
+
setup(
cmdclass=cmdclass,
)
|
Re-add platform tag to setup.py
|
py
|
diff --git a/src/drivers/PiBot/real/piBot.py b/src/drivers/PiBot/real/piBot.py
index <HASH>..<HASH> 100644
--- a/src/drivers/PiBot/real/piBot.py
+++ b/src/drivers/PiBot/real/piBot.py
@@ -267,7 +267,7 @@ class PiBot:
DcMin = 29
DcMax = 971
Pi = 3.1416
- FullCircle = 360 #2 * Pi
+ FullCircle = 2 * Pi
DutyScale = 1000
Q2Min = FullCircle / 4 #angulo minimo perteneciente al segundo cuadrante
Q3Max = Q2Min * 3 #angulo maximo perteneciente al tercer cuadrante
|
"leerangulorueda" function added to the driver pibot.py
|
py
|
diff --git a/kafka_utils/util/client.py b/kafka_utils/util/client.py
index <HASH>..<HASH> 100644
--- a/kafka_utils/util/client.py
+++ b/kafka_utils/util/client.py
@@ -24,7 +24,7 @@ from retrying import retry
from kafka_utils.util.protocol import KafkaToolProtocol
-RETRY_ATTEMPTS = 5
+RETRY_ATTEMPTS = 10
WAIT_BEFORE_RETRYING = 2 * 1000
|
increasing the retry attempts to <I> from 5, now that acceptance tests actually starts at copy_group with kafka storage, and because is just starting up, it removes way more retries it seems
|
py
|
diff --git a/compliance_checker/cf/cf.py b/compliance_checker/cf/cf.py
index <HASH>..<HASH> 100644
--- a/compliance_checker/cf/cf.py
+++ b/compliance_checker/cf/cf.py
@@ -499,6 +499,10 @@ class CFBaseCheck(BaseCheck):
if hasattr(v, 'standard_name') and 'status_flag' in v.standard_name:
continue
+
+ # skip DSG cf_role
+ if hasattr(v, "cf_role"):
+ continue
units = getattr(v, 'units', None)
|
Skip cf_role attr'd vars in check_units, fixes #<I>
|
py
|
diff --git a/axiom/test/test_upgrading.py b/axiom/test/test_upgrading.py
index <HASH>..<HASH> 100644
--- a/axiom/test/test_upgrading.py
+++ b/axiom/test/test_upgrading.py
@@ -411,10 +411,18 @@ class SubStoreCompat(SwordUpgradeTest):
return self.currentSubStore
def closeStore(self):
- self.currentSubStore.close()
- self.currentTopStore.close()
- self.currentSubStore = None
- self.currentTopStore = None
+ service = IService(self.currentTopStore)
+ if service.running:
+ result = service.stopService()
+ else:
+ result = succeed(None)
+ def stopped(ignored):
+ self.currentSubStore.close()
+ self.currentTopStore.close()
+ self.currentSubStore = None
+ self.currentTopStore = None
+ result.addCallback(stopped)
+ return result
def startStoreService(self):
svc = IService(self.currentTopStore)
|
Apply the same stop service logic to the closeStore method of the SubStore version of the tests
|
py
|
diff --git a/ldapcherry/backend/backendAD.py b/ldapcherry/backend/backendAD.py
index <HASH>..<HASH> 100644
--- a/ldapcherry/backend/backendAD.py
+++ b/ldapcherry/backend/backendAD.py
@@ -23,12 +23,16 @@ class CaFileDontExist(Exception):
self.log = "CA file %(cafile)s don't exist" % {'cafile': cafile}
+class MissingAttr(Exception):
+ def __init__(self):
+ self.log = 'attributes "cn" and "unicodePwd" must be declared ' \
+ 'in attributes.yml for all Active Directory backends.'
+
NO_ATTR = 0
DISPLAYED_ATTRS = 1
LISTED_ATTRS = 2
ALL_ATTRS = 3
-
# UserAccountControl Attribute/Flag Values
# For details, look at:
# https://support.microsoft.com/en-us/kb/305144
@@ -139,6 +143,12 @@ class Backend(ldapcherry.backend.backendLdap.Backend):
for a in attrslist:
self.attrlist.append(self._str(a))
+ if 'cn' not in self.attrlist:
+ raise MissingAttr()
+
+ if 'unicodePwd' not in self.attrlist:
+ raise MissingAttr()
+
def _search_group(self, searchfilter, groupdn):
searchfilter = self._str(searchfilter)
ldap_client = self._bind()
|
making some attributes explicitly mandatory for Active Directory backend
|
py
|
diff --git a/test/testBase.py b/test/testBase.py
index <HASH>..<HASH> 100644
--- a/test/testBase.py
+++ b/test/testBase.py
@@ -1,4 +1,7 @@
import unittest
+import sys
+import os
+sys.path.insert(0, os.path.abspath('..'))
from observed import event
|
Modify system path so that tests can import from source distribution
|
py
|
diff --git a/isort/isort.py b/isort/isort.py
index <HASH>..<HASH> 100644
--- a/isort/isort.py
+++ b/isort/isort.py
@@ -902,7 +902,8 @@ class SortImports(object):
self.comments['straight'][module] = comments
comments = None
- if len(self.out_lines) > max(self.import_index, self._first_comment_index_end, 1) - 1:
+ if len(self.out_lines) > max(self.import_index, self._first_comment_index_end + 1, 1) - 1:
+
last = self.out_lines and self.out_lines[-1].rstrip() or ""
while (last.startswith("#") and not last.endswith('"""') and not last.endswith("'''") and
not 'isort:imports-' in last):
|
Implement a fix for issue #<I>
|
py
|
diff --git a/pghoard/transfer.py b/pghoard/transfer.py
index <HASH>..<HASH> 100644
--- a/pghoard/transfer.py
+++ b/pghoard/transfer.py
@@ -85,9 +85,11 @@ class TransferAgent(Thread):
self.set_state_defaults_for_site(site)
oper_size = file_to_transfer.get("file_size", 0)
if result["success"]:
+ filename = os.path.basename(file_to_transfer["local_path"])
self.state[site][oper][filetype]["count"] += 1
self.state[site][oper][filetype]["data"] += oper_size
self.state[site][oper][filetype]["time_taken"] += time.time() - start_time
+ self.state[site][oper][filetype]["latest_filename"] = filename
else:
self.state[site][oper][filetype]["failures"] += 1
|
transfer: Store the name of the latest file transferred This allows us to see which files of which type have been transferred last just by glancing at the state file.
|
py
|
diff --git a/testkitbackend/requests.py b/testkitbackend/requests.py
index <HASH>..<HASH> 100644
--- a/testkitbackend/requests.py
+++ b/testkitbackend/requests.py
@@ -65,6 +65,15 @@ def VerifyConnectivity(backend, data):
backend.send_response("Driver", {"id": driver_id})
+def CheckMultiDBSupport(backend, data):
+ driver_id = data["driverId"]
+ driver = backend.drivers[driver_id]
+ backend.send_response(
+ "MultiDBSupport",
+ {"id": backend.next_key(), "available": driver.supports_multi_db()}
+ )
+
+
def resolution_func(backend):
def resolve(address):
key = backend.next_key()
|
Add CheckMultiDBSupport support to testkit backend (#<I>) Add CheckMultiDBSupport request and MultiDBSupport response support to testkit backend
|
py
|
diff --git a/bugzilla/base.py b/bugzilla/base.py
index <HASH>..<HASH> 100644
--- a/bugzilla/base.py
+++ b/bugzilla/base.py
@@ -164,6 +164,10 @@ class Bugzilla(object):
[bugzilla.yoursite.com]
user = username
password = password
+ Or
+ [bugzilla.yoursite.com]
+ api_key = key
+
You can also use the [DEFAULT] section to set defaults that apply to
any site without a specific section of its own.
Be sure to set appropriate permissions on bugzillarc if you choose to
@@ -458,7 +462,10 @@ class Bugzilla(object):
return
for key, val in cfg.items(section):
- if key == "user":
+ if key == "api_key":
+ log.debug("bugzillarc: setting api_key")
+ self.api_key = val
+ elif key == "user":
log.debug("bugzillarc: setting user=%s", val)
self.user = val
elif key == "password":
|
support setting api_key in bugzillarc
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -1,7 +1,8 @@
+# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
- name='djangorestframework',
+ name='drf-nested-fields',
version="0.9.0",
url='https://github.com/seebass/drf-nested-fields',
license='MIT',
|
FIXED: name in setup.py
|
py
|
diff --git a/tests/test_ext_marshmallow.py b/tests/test_ext_marshmallow.py
index <HASH>..<HASH> 100644
--- a/tests/test_ext_marshmallow.py
+++ b/tests/test_ext_marshmallow.py
@@ -598,4 +598,4 @@ class TestDictValues:
spec.definition('SchemaWithDict', schema=SchemaWithDict)
result = spec._definitions['SchemaWithDict']['properties']['dict_field']
assert 'additionalProperties' in result
- assert result['additionalProperties'] == 'string'
+ assert result['additionalProperties']['type'] == 'string'
|
Access 'type' property of 'additionalProperties'
|
py
|
diff --git a/croppie/widgets/__init__.py b/croppie/widgets/__init__.py
index <HASH>..<HASH> 100644
--- a/croppie/widgets/__init__.py
+++ b/croppie/widgets/__init__.py
@@ -1,7 +1,7 @@
import django
from django import forms
-if django.VERSION[1] < 11:
+if django.VERSION[0] <= 1 and django.VERSION[1] < 11:
from .widgets_old import CroppieWidget
else:
from .widgets import CroppieWidget
|
Don't render the Django <<I> widget on Django >=2 Only checking the minor version results in Django <I>, <I>, etc. rendering the widget from widgets_old which is incorrect.
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -28,7 +28,7 @@ import imp
import versioneer
-requirements = ['pyzmq', 'six']
+requirements = ['pyzmq', 'six', 'netifaces']
if sys.version_info < (2, 6):
requirements.append('simplejson')
|
Add netifaces as a requirement
|
py
|
diff --git a/timepiece/models.py b/timepiece/models.py
index <HASH>..<HASH> 100644
--- a/timepiece/models.py
+++ b/timepiece/models.py
@@ -262,7 +262,7 @@ class Entry(models.Model):
end_is_inside = entry_a.end_time > entry_b.start_time \
and entry_a.end_time < entry_b.end_time
b_is_inside = entry_a.start_time < entry_b.start_time \
- and entry_a.end_time > entry_b.end_time:
+ and entry_a.end_time > entry_b.end_time
overlap = start_is_inside or end_is_inside or b_is_inside
return overlap
|
Stray colon left in last commit invalidates syntax
|
py
|
diff --git a/tests/query/test_collection.py b/tests/query/test_collection.py
index <HASH>..<HASH> 100644
--- a/tests/query/test_collection.py
+++ b/tests/query/test_collection.py
@@ -810,6 +810,8 @@ class TestCollectionQueryTool(unittest.TestCase):
return response
self.query_tool.url_base = "https://example.org"
+ self.query_tool.token = secrets.token_urlsafe(6)
+
request_mock.side_effect = mocking
expected = response_dict
@@ -1019,6 +1021,7 @@ class TestCollectionQueryTool(unittest.TestCase):
"""
self.availability_status_dataset["checker_type"] = "GIT"
+ self.query_tool.token = secrets.token_urlsafe(6)
self.assertRaises(
ValueError,
|
fixup! Integration of the collection.
|
py
|
diff --git a/pyontutils/scig.py b/pyontutils/scig.py
index <HASH>..<HASH> 100755
--- a/pyontutils/scig.py
+++ b/pyontutils/scig.py
@@ -60,7 +60,8 @@ class ImportChain: # TODO abstract this a bit to support other onts, move back
direction='OUTGOING'))
nodes = Async()(deferred(gin)(i) for i in iris)
imports = [(i, *[(e['obj'], 'owl:imports', e['sub'])
- for e in n['edges']])
+ for e in n['edges']
+ if not e['sub'].startswith('_:')])
for i, n in nodes if n]
self.itrips = sorted(set(tuple(rdflib.URIRef(OntId(e).iri) for e in t)
for i, *ts in imports if ts for t in ts))
|
scig ImportChain fix issue with bnode imports how such an import came to be I cannot imagine, but here we are
|
py
|
diff --git a/gems/__init__.py b/gems/__init__.py
index <HASH>..<HASH> 100644
--- a/gems/__init__.py
+++ b/gems/__init__.py
@@ -2,7 +2,7 @@
__author__ = 'Blake Printy'
__email__ = '[email protected]'
-__version__ = '0.1.0'
+__version__ = '0.1.1'
from .datatypes import composite
|
incremented patch functionality for api name change
|
py
|
diff --git a/BAC0/core/devices/Points.py b/BAC0/core/devices/Points.py
index <HASH>..<HASH> 100644
--- a/BAC0/core/devices/Points.py
+++ b/BAC0/core/devices/Points.py
@@ -450,7 +450,10 @@ class Point:
AnalogValue are written to
AnalogOutput are overridden
"""
- if "Value" in self.properties.type:
+ if 'characterstring' in self.properties.type:
+ self.write(value)
+
+ elif "Value" in self.properties.type:
if str(value).lower() == "auto":
raise ValueError(
"Value was not simulated or overridden, cannot release to auto"
@@ -946,6 +949,15 @@ class StringPoint(Point):
"""
return None
+ def _trend(self, res):
+ super()._trend(res)
+
+ @property
+ def value(self):
+ res = super().value
+ self._trend(res)
+ return res
+
def _set(self, value):
try:
if isinstance(value, str):
|
Fix for writing to characterstring... was impossible to write auto :0)
|
py
|
diff --git a/hypermap/dynasty/utils.py b/hypermap/dynasty/utils.py
index <HASH>..<HASH> 100644
--- a/hypermap/dynasty/utils.py
+++ b/hypermap/dynasty/utils.py
@@ -25,6 +25,8 @@ def clean_text(text):
text = text.decode("utf-8")
except UnicodeEncodeError:
text = text.encode("ascii", "ignore")
+ except:
+ pass
return text
|
Changes to the exception by adding extrawq
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100755
--- a/setup.py
+++ b/setup.py
@@ -39,16 +39,6 @@ from distutils.dist import Distribution
from distutils.cmd import Command
from distutils.command.clean import (clean, log, remove_tree)
-# check LAL
-try:
- import lal
-except ImportError as e:
- e.args = ('%s. LAL is required by GWpy, please install before '
- 'continuing, see '
- 'https://gwpy.github.io/docs/stable/install/lal.html '
- 'for details' % str(e),)
- raise
-
# set basic metadata
PACKAGENAME = 'gwpy'
AUTHOR = 'Duncan Macleod'
@@ -89,6 +79,12 @@ extras_require = {
extras_require['all'] = set(p for extra in extras_require.values()
for p in extra)
+# test for LAL
+try:
+ import lal
+except ImportError as e:
+ install_requires.append('ligotimegps')
+
# test for OrderedDict
try:
from collections import OrderedDict
|
setup: don't require LAL at install time install ligotimegps instead
|
py
|
diff --git a/cr8/fake_providers.py b/cr8/fake_providers.py
index <HASH>..<HASH> 100644
--- a/cr8/fake_providers.py
+++ b/cr8/fake_providers.py
@@ -3,30 +3,19 @@
import math
from faker.providers import BaseProvider
-from multiprocessing import Manager
EARTH_RADIUS = 6371 # earth radius in km
-# Not implemented as Provider because with providers it's not possible to have
-# 1 instance per column. So either there would be one shared Counter accross
-# multiple auto_inc columns or there could be duplicate values within one column
+def auto_inc(fake, col):
+ x = 0
-class Counter:
- def __init__(self, value, lock):
- self.value = value
- self.lock = lock
-
- def __call__(self):
- val = self.value
- with self.lock:
- val.value += 1
- return val.value
-
+ def inc():
+ nonlocal x
+ x = x + 1
+ return x
-def auto_inc(fake, col):
- manager = Manager()
- return Counter(manager.Value('i', 0), manager.Lock())
+ return inc
def _dest_point(point, distance, bearing, radius):
|
Implement auto_inc without multiprocessing Manager ingest pipeline is no longer using multiple processes, so synchronization is no longer needed
|
py
|
diff --git a/pyhomematic/devicetypes/actors.py b/pyhomematic/devicetypes/actors.py
index <HASH>..<HASH> 100644
--- a/pyhomematic/devicetypes/actors.py
+++ b/pyhomematic/devicetypes/actors.py
@@ -401,7 +401,7 @@ class IPSwitchPowermeter(IPSwitch, HMSensor):
"ENERGY_COUNTER": [sensorIndex]})
-class IPKeySwitchPowermeter(IPSwitchPowermeter):
+class IPKeySwitchPowermeter(IPSwitchPowermeter, HMEvent, HelperEventRemote, HelperActionPress):
"""
Switch turning plugged in device on or off and measuring energy consumption.
"""
|
adding missed super classes for IPKeySwitchPowermeter
|
py
|
diff --git a/salt/modules/win_servermanager.py b/salt/modules/win_servermanager.py
index <HASH>..<HASH> 100644
--- a/salt/modules/win_servermanager.py
+++ b/salt/modules/win_servermanager.py
@@ -47,6 +47,9 @@ def _check_server_manager():
Returns: True if import is successful, otherwise returns False
'''
+ if 'Server' not in __grains__['osrelease']:
+ return False
+
return not __salt__['cmd.retcode']('Import-Module ServerManager',
shell='powershell',
python_shell=True)
|
Check for Server os before checking
|
py
|
diff --git a/tool_reset_account.py b/tool_reset_account.py
index <HASH>..<HASH> 100644
--- a/tool_reset_account.py
+++ b/tool_reset_account.py
@@ -13,7 +13,7 @@ import concurrent.futures
def purge_blob_containers(account, account_key):
"""
Delete all blob containers in the given storage account.
- USE AT OWN RISK.
+ USE AT OWN RISK. NOT SUPPORTED BY STORAGE TEAM.
"""
bs = BlockBlobService(account, account_key)
|
Updated disclaimer for tool_reset_account
|
py
|
diff --git a/keepalive/keepalive.py b/keepalive/keepalive.py
index <HASH>..<HASH> 100644
--- a/keepalive/keepalive.py
+++ b/keepalive/keepalive.py
@@ -312,7 +312,10 @@ class KeepAliveHandler:
if not req.headers.has_key('Content-length'):
h.putheader('Content-length', '%d' % len(data))
else:
- h.putrequest('GET', req.selector)
+ if hasattr(req, 'selector'):
+ h.putrequest('GET', req.selector)
+ else:
+ h.putrequest('GET', req.get_selector())
except (socket.error, httplib.HTTPException), err:
raise urllib2.URLError(err)
|
improved patch from <I>f<I>ef<I>b5fe<I>c9efbf4fed7da<I>e to address the issue identified by @xflr6 in RDFLib/sparqlwrapper#<I>
|
py
|
diff --git a/py/h2o.py b/py/h2o.py
index <HASH>..<HASH> 100644
--- a/py/h2o.py
+++ b/py/h2o.py
@@ -1256,10 +1256,10 @@ class H2O(object):
'-hdfs hdfs://' + self.hdfs_name_node,
'-hdfs_version=' + self.hdfs_version,
]
- if self.hdfs_config:
- args += [
- '-hdfs_config ' + self.hdfs_config
- ]
+ if self.hdfs_config:
+ args += [
+ '-hdfs_config ' + self.hdfs_config
+ ]
if not self.sigar:
args += ['--nosigar']
|
Minor change to have fine-grained control over HDFS options
|
py
|
diff --git a/fireplace/game.py b/fireplace/game.py
index <HASH>..<HASH> 100644
--- a/fireplace/game.py
+++ b/fireplace/game.py
@@ -115,6 +115,7 @@ class BaseGame(Entity):
result = self.action_block(source, actions, type, target=target)
if self.state != State.COMPLETE:
self.manager.step(Step.MAIN_ACTION, Step.MAIN_END)
+ return result
def joust(self, source, challenger, defender, actions):
type = BlockType.JOUST
@@ -139,7 +140,7 @@ class BaseGame(Entity):
actions = []
if cards:
- self.action_start(type, self, -1, None)
+ self.action_start(type, self, 0, None)
for card in cards:
card.zone = Zone.GRAVEYARD
actions.append(Death(card))
|
DEATHS block should always have an index of 0
|
py
|
diff --git a/tensorflow_datasets/core/features/text/text_encoder.py b/tensorflow_datasets/core/features/text/text_encoder.py
index <HASH>..<HASH> 100644
--- a/tensorflow_datasets/core/features/text/text_encoder.py
+++ b/tensorflow_datasets/core/features/text/text_encoder.py
@@ -330,16 +330,16 @@ class TokenTextEncoder(TextEncoder):
}
if self._user_defined_tokenizer is not None:
self._tokenizer.save_to_file(filename)
- kwargs["tokenizer_file_prefix"] = filename
+ kwargs["has_tokenizer"] = True
self._write_lines_to_file(filename, self._vocab_list, kwargs)
@classmethod
def load_from_file(cls, filename_prefix):
filename = cls._filename(filename_prefix)
vocab_lines, kwargs = cls._read_lines_from_file(filename)
- tokenizer_file = kwargs.pop("tokenizer_file_prefix", None)
- if tokenizer_file:
- kwargs["tokenizer"] = Tokenizer.load_from_file(tokenizer_file)
+ has_tokenizer = kwargs.pop("has_tokenizer", False)
+ if has_tokenizer:
+ kwargs["tokenizer"] = Tokenizer.load_from_file(filename)
return cls(vocab_list=vocab_lines, **kwargs)
|
Fix bug involving temporary filepaths when loading a Tokenizer of a TokenTextEncoder. Because the data directory is marked as "incomplete" while the dataset is being built, and later renamed, absolute filepaths written while the dataset is being built are wrong. This change eliminates the absolute filepaths and instead assumes a constant relative structure between the metadata files. PiperOrigin-RevId: <I>
|
py
|
diff --git a/baron/inner_formatting_grouper.py b/baron/inner_formatting_grouper.py
index <HASH>..<HASH> 100644
--- a/baron/inner_formatting_grouper.py
+++ b/baron/inner_formatting_grouper.py
@@ -73,7 +73,7 @@ GROUP_ON = (
"AT",
"IF",
"ELSE",
- "FROM"
+ "FROM",
"EQUAL",
"PLUS_EQUAL",
"MINUS_EQUAL",
|
Add missing comma in Python string list In Python, two adjacent strings get concatenated implicitly. Missing commas in multi-line string lists is a common source of bugs causing unwanted string concatenation. In this case, it is clear that this comma is missing by mistake and there should not be a concatenation.
|
py
|
diff --git a/ipyrad/assemble/cluster_across.py b/ipyrad/assemble/cluster_across.py
index <HASH>..<HASH> 100644
--- a/ipyrad/assemble/cluster_across.py
+++ b/ipyrad/assemble/cluster_across.py
@@ -330,6 +330,10 @@ def build_catg_file(data, samples):
for handle in h5handles:
os.remove(handle)
+ for sample in samples:
+ sample.stats.state = 6
+ ## save stats to data
+ data._stamp("s6 clustered across "+sample.name)
def singlecat(data, sample):
|
Added update to set sample.stats.state to 6 upon completion of clustering
|
py
|
diff --git a/mathematica/lexer.py b/mathematica/lexer.py
index <HASH>..<HASH> 100644
--- a/mathematica/lexer.py
+++ b/mathematica/lexer.py
@@ -12,7 +12,7 @@ import mathematica.builtins as mma
class Regex:
IDENTIFIER = r'[a-zA-Z\$][a-zA-Z0-9\$]*'
- NAMED_CHARACTER = r'\\[{identifier}]'.format(identifier=IDENTIFIER)
+ NAMED_CHARACTER = r'\\\[{identifier}]'.format(identifier=IDENTIFIER)
SYMBOLS = (r'[`]?({identifier}|{named_character})(`({identifier}|{named_character}))*[`]?'
.format(identifier=IDENTIFIER, named_character=NAMED_CHARACTER))
INTEGER = r'[0-9]+'
|
Remove FutureWarning: Possible nested set at ... (#9) Fixes #8
|
py
|
diff --git a/grammpy/Grammars/RawGrammar.py b/grammpy/Grammars/RawGrammar.py
index <HASH>..<HASH> 100644
--- a/grammpy/Grammars/RawGrammar.py
+++ b/grammpy/Grammars/RawGrammar.py
@@ -10,7 +10,7 @@ import inspect
from ..Terminal import Terminal
from ..Nonterminal import Nonterminal
from ..HashContainer import HashContainer
-from ..exceptions import NotNonterminalException, NotRuleException
+from ..exceptions import NotNonterminalException, NotRuleException, TerminalDoesNotExistsException, NonterminalDoesNotExistsException
from ..IsMethodsRuleExtension import IsMethodsRuleExtension
@@ -117,8 +117,11 @@ class RawGrammar:
return self.__rules.remove(rules)
def have_rule(self, rules):
- rules = self._control_rules(rules)
- return self.__rules.have(rules)
+ try:
+ rules = self._control_rules(rules)
+ return self.__rules.have(rules)
+ except (TerminalDoesNotExistsException, NonterminalDoesNotExistsException):
+ return False
def get_rule(self, rules=None):
if rules is None:
|
Grammar.have_rule now returns false (instead of exception) when grammar dont have terminal/nonterminal that rule use
|
py
|
diff --git a/horizon/dashboards/nova/images_and_snapshots/images/forms.py b/horizon/dashboards/nova/images_and_snapshots/images/forms.py
index <HASH>..<HASH> 100644
--- a/horizon/dashboards/nova/images_and_snapshots/images/forms.py
+++ b/horizon/dashboards/nova/images_and_snapshots/images/forms.py
@@ -41,10 +41,12 @@ class UpdateImageForm(forms.SelfHandlingForm):
image_id = forms.CharField(widget=forms.HiddenInput())
name = forms.CharField(max_length="25", label=_("Name"))
kernel = forms.CharField(max_length="36", label=_("Kernel ID"),
+ required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}
))
ramdisk = forms.CharField(max_length="36", label=_("Ramdisk ID"),
+ required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}
))
|
set kernel and ramdisk id not as required field as it's not editable * fix bug <I> Change-Id: I<I>c0b<I>b5d7ce<I>b1c1e4e<I>b<I>bab<I>
|
py
|
diff --git a/wcmatch/glob.py b/wcmatch/glob.py
index <HASH>..<HASH> 100644
--- a/wcmatch/glob.py
+++ b/wcmatch/glob.py
@@ -503,9 +503,11 @@ class PurePath(pathlib.PurePath):
def _translate_for_glob(self, patterns, flags):
"""Translate for glob."""
+ if not all([isinstance(p, str) for p in ([patterns] if isinstance(patterns, (str, bytes)) else patterns)]):
+ raise ValueError("Expected a pattern of type 'str', but received 'bytes' instead")
+
sep = ''
flags |= _wcparse.GLOBSTAR
- is_bytes = isinstance(([patterns] if isinstance(patterns, (str, bytes)) else patterns)[0], bytes)
if isinstance(self, PureWindowsPath):
if flags & _wcparse.FORCEUNIX:
raise ValueError("Windows pathlike objects cannot be forced to behave like a Posix path")
@@ -516,10 +518,8 @@ class PurePath(pathlib.PurePath):
flags |= _wcparse.FORCEUNIX
if isinstance(self, Path) and self.is_dir():
sep = self._flavour.sep
- if is_bytes:
- sep = os.fsencode(sep)
- return bytes(self) + sep if is_bytes else str(self) + sep, flags
+ return str(self) + sep, flags
def match(self, patterns, *, flags=0):
"""Match the pattern."""
|
Bytes patterns are not allowed in pathlib globbing and matching This mirrors how pathlib already operates.
|
py
|
diff --git a/src/feat/models/applicationjson.py b/src/feat/models/applicationjson.py
index <HASH>..<HASH> 100644
--- a/src/feat/models/applicationjson.py
+++ b/src/feat/models/applicationjson.py
@@ -107,6 +107,25 @@ def render_value(value):
result["label"] = value.label
if value.desc is not None:
result["desc"] = value.desc
+ metadata = render_metadata(value)
+ if metadata:
+ result["metadata"] = metadata
+ if IValueCollection.providedBy(value):
+ coll = IValueCollection(value)
+ result["allowed"] = [render_value(v) for v in coll.allowed_types]
+ result["ordered"] = coll.is_ordered
+ result["multiple"] = coll.allow_multiple
+ if IValueRange.providedBy(value):
+ vrange = IValueRange(value)
+ result["minimum"] = vrange.minimum
+ result["maximum"] = vrange.maximum
+ if vrange.increment is not None:
+ result["increment"] = vrange.increment
+ if IValueOptions.providedBy(value):
+ options = IValueOptions(value)
+ result["restricted"] = options.is_restricted
+ result["options"] = [{"label": o.label, "value": o.value}
+ for o in options.iter_options()]
return result
|
Adds more value information rendering to json writer.
|
py
|
diff --git a/troposphere/appmesh.py b/troposphere/appmesh.py
index <HASH>..<HASH> 100644
--- a/troposphere/appmesh.py
+++ b/troposphere/appmesh.py
@@ -3,7 +3,7 @@
#
# See LICENSE file for full license.
-from . import AWSObject, AWSProperty
+from . import AWSObject, AWSProperty, Tags
from .validators import integer
@@ -215,3 +215,26 @@ class Mesh(AWSObject):
'Spec': (MeshSpec, False),
'Tags': ([TagRef], False),
}
+
+
+class VirtualRouterListener(AWSProperty):
+ props = {
+ 'PortMapping': (PortMapping, True),
+ }
+
+
+class VirtualRouterSpec(AWSProperty):
+ props = {
+ 'Listeners': ([VirtualRouterListener], True),
+ }
+
+
+class VirtualRouter(AWSObject):
+ resource_type = "AWS::AppMesh::VirtualRouter"
+
+ props = {
+ 'MeshName': (basestring, True),
+ 'Spec': (VirtualRouterSpec, True),
+ 'Tags': (Tags, False),
+ 'VirtualRouterName': (basestring, True),
+ }
|
Add AppMesh::VirtualRouter (#<I>)
|
py
|
diff --git a/gwpy/segments/io/hdf5.py b/gwpy/segments/io/hdf5.py
index <HASH>..<HASH> 100644
--- a/gwpy/segments/io/hdf5.py
+++ b/gwpy/segments/io/hdf5.py
@@ -25,6 +25,7 @@ cleaner.
import h5py
import numpy
+from distutils.version import LooseVersion
from astropy.io.registry import (register_reader, register_writer,
register_identifier)
@@ -162,7 +163,10 @@ def segmentlist_from_hdf5(f, name=None, gpstype=LIGOTimeGPS):
else:
dataset = h5file[name]
- data = dataset[()]
+ try:
+ data = dataset[()]
+ except ValueError:
+ data = []
out = SegmentList()
for row in data:
@@ -214,7 +218,10 @@ def segmentlist_to_hdf5(seglist, output, name, group=None,
start, end = map(LIGOTimeGPS, seg)
data[i, :] = (start.seconds, start.nanoseconds,
end.seconds, end.nanoseconds)
-
+ if (not len(seglist) and
+ LooseVersion(h5py.version.version).version[0] < 2):
+ kwargs.setdefault('maxshape', (None, 4))
+ kwargs.setdefault('chunks', (1, 1))
dset = h5group.create_dataset(name, data=data,
compression=compression, **kwargs)
finally:
|
segments.io.hdf5: fix version issue with empty
|
py
|
diff --git a/cherrypy/_cpchecker.py b/cherrypy/_cpchecker.py
index <HASH>..<HASH> 100644
--- a/cherrypy/_cpchecker.py
+++ b/cherrypy/_cpchecker.py
@@ -46,6 +46,20 @@ class Checker(object):
# This value should be set inside _cpconfig.
global_config_contained_paths = False
+ def check_app_config_entries_dont_start_with_script_name(self):
+ for sn, app in cherrypy.tree.apps.iteritems():
+ if not isinstance(app, cherrypy.Application):
+ continue
+ if not app.config:
+ continue
+ if sn == '':
+ continue
+ for key in app.config.keys():
+ if key.startswith(sn):
+ warnings.warn(
+ "The application mounted at %r has config " \
+ "entries that start with its script name: %r" % (sn, key))
+
def check_skipped_app_config(self):
for sn, app in cherrypy.tree.apps.iteritems():
if not isinstance(app, cherrypy.Application):
|
#<I> - cpchecker method to catch application configuration that starts with script-name
|
py
|
diff --git a/openquake/baselib/general.py b/openquake/baselib/general.py
index <HASH>..<HASH> 100644
--- a/openquake/baselib/general.py
+++ b/openquake/baselib/general.py
@@ -399,8 +399,7 @@ def assert_independent(package, *packages):
>>> assert_independent('openquake.hazardlib',
... 'openquake.risklib', 'openquake.commonlib')
- >>> assert_independent('openquake.risklib',
- ... 'openquake.hazardlib', 'openquake.commonlib')
+ >>> assert_independent('openquake.risklib', 'openquake.commonlib')
>>> assert_independent('openquake.risklib.tests', 'openquake.risklib')
Traceback (most recent call last):
...
|
Removed a failing doctest
|
py
|
diff --git a/vncdotool/api.py b/vncdotool/api.py
index <HASH>..<HASH> 100644
--- a/vncdotool/api.py
+++ b/vncdotool/api.py
@@ -36,9 +36,8 @@ def connect(server, password=None):
in the main thread of non-Twisted Python Applications, EXPERIMENTAL.
>>> from vncdotool import api
- >>> client = api.connect('host')
- >>> client.keyPress('c')
- >>> api.shutdown()
+ >>> with api.connect('host') as client
+ >>> client.keyPress('c')
You may then call any regular VNCDoToolClient method on client from your
application code.
@@ -83,6 +82,12 @@ class ThreadedVNCClientProxy(object):
self.queue = queue.Queue()
self._timeout = 60 * 60
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *_):
+ self.disconnect()
+
@property
def timeout(self):
"""Timeout in seconds for API requests."""
|
issue #<I>: add context manager protocol to client
|
py
|
diff --git a/test/__init__.py b/test/__init__.py
index <HASH>..<HASH> 100644
--- a/test/__init__.py
+++ b/test/__init__.py
@@ -605,7 +605,7 @@ class ClientContext(object):
func=func)
def supports_transactions(self):
- if self.version.at_least(4, 1, 6):
+ if self.version.at_least(4, 1, 8):
return self.is_mongos or self.is_rs
if self.version.at_least(4, 0):
|
PYTHON-<I> Bump required server version for testing sharded transactions
|
py
|
diff --git a/host/basil/utils/sim/utils.py b/host/basil/utils/sim/utils.py
index <HASH>..<HASH> 100644
--- a/host/basil/utils/sim/utils.py
+++ b/host/basil/utils/sim/utils.py
@@ -7,6 +7,7 @@
import subprocess
import time
+import os
def cocotb_makefile(sim_files, top_level = 'tb', test_module='basil.utils.sim.Test' ,sim_host='localhost', sim_port=12345, sim_bus='basil.utils.sim.BasilBusDriver',
end_on_disconnect=True, include_dirs=['../../../device/modules', '../../../device/modules/includes'] ):
@@ -41,13 +42,14 @@ def cocotb_compile_and_run(verilog_sources):
file = open('Makefile','w')
file.write(cocotb_makefile(verilog_sources, top_level='none'))
file.close()
- subprocess.call("make", shell=True)
+ FNULL = open(os.devnull, 'w')
+ subprocess.call("make", shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
#run simulator in background
file = open('Makefile','w')
file.write(cocotb_makefile(verilog_sources))
file.close()
subprocess.Popen(['make'])
- time.sleep(3)
+ time.sleep(2)
\ No newline at end of file
|
ENH: Hide fake errors during verilog build
|
py
|
diff --git a/vaping/plugins/zeromq.py b/vaping/plugins/zeromq.py
index <HASH>..<HASH> 100644
--- a/vaping/plugins/zeromq.py
+++ b/vaping/plugins/zeromq.py
@@ -1,6 +1,9 @@
from builtins import str
-import zmq.green as zmq
+try:
+ import zmq.green as zmq
+except ImportError:
+ zmq = None
import vaping
import vaping.plugins
@@ -11,6 +14,11 @@ class ZeroMQ(vaping.plugins.EmitBase):
def init(self):
self.log.debug("init zeromq ..")
+ if not zmq:
+ self.log.error("missing zeromq, please install pyzmq to use this plugin")
+ self.skip = True
+ return
+
self.skip = False
self.ctx = zmq.Context()
|
fixes #1 detect if pyzmq is installed
|
py
|
diff --git a/torchvision/datasets/folder.py b/torchvision/datasets/folder.py
index <HASH>..<HASH> 100644
--- a/torchvision/datasets/folder.py
+++ b/torchvision/datasets/folder.py
@@ -151,7 +151,7 @@ class DatasetFolder(data.Dataset):
return fmt_str
-IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']
+IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff']
def pil_loader(path):
|
Add support for .TIFF files in ImageFolder (#<I>)
|
py
|
diff --git a/salt/returners/slack_returner.py b/salt/returners/slack_returner.py
index <HASH>..<HASH> 100644
--- a/salt/returners/slack_returner.py
+++ b/salt/returners/slack_returner.py
@@ -4,7 +4,7 @@ Return salt data via slack
.. versionadded:: 2015.5.0
-The following fields can be set in the minion conf file::
+The following fields can be set in the minion conf file:
.. code-block:: yaml
|
Remove and extra colon that is causing rendering issues
|
py
|
diff --git a/zstd_cffi.py b/zstd_cffi.py
index <HASH>..<HASH> 100644
--- a/zstd_cffi.py
+++ b/zstd_cffi.py
@@ -429,9 +429,8 @@ class ZstdCompressor(object):
if not data:
break
- total_read += len(data)
-
data_buffer = ffi.from_buffer(data)
+ total_read += len(data_buffer)
in_buffer.src = data_buffer
in_buffer.size = len(data_buffer)
in_buffer.pos = 0
|
cffi: calculate read byte count from buffer size This is more accurate than asking the object for its length.
|
py
|
diff --git a/python_modules/dagster/dagster/core/storage/pipeline_run.py b/python_modules/dagster/dagster/core/storage/pipeline_run.py
index <HASH>..<HASH> 100644
--- a/python_modules/dagster/dagster/core/storage/pipeline_run.py
+++ b/python_modules/dagster/dagster/core/storage/pipeline_run.py
@@ -319,10 +319,13 @@ class PipelineRun(
"external_pipeline_origin is required for queued runs",
)
+ if run_id is None:
+ run_id = make_new_run_id()
+
return super(PipelineRun, cls).__new__(
cls,
pipeline_name=check.opt_str_param(pipeline_name, "pipeline_name"),
- run_id=check.opt_str_param(run_id, "run_id", default=make_new_run_id()),
+ run_id=check.str_param(run_id, "run_id"),
run_config=check.opt_dict_param(run_config, "run_config", key_type=str),
mode=check.opt_str_param(mode, "mode"),
solid_selection=solid_selection,
|
[PipelineRun] only make new run id when needed (#<I>)
|
py
|
diff --git a/tensor2tensor/rl/evaluator.py b/tensor2tensor/rl/evaluator.py
index <HASH>..<HASH> 100644
--- a/tensor2tensor/rl/evaluator.py
+++ b/tensor2tensor/rl/evaluator.py
@@ -132,7 +132,7 @@ def planner_guess2():
planning_horizon=8,
rollout_agent_type="policy",
env_type="simulated",
- uct_const=0.2,
+ uct_const=3.,
uniform_first_action=True,
uct_std_normalization=True,
)
@@ -146,7 +146,7 @@ def planner_guess3():
planning_horizon=8,
rollout_agent_type="policy",
env_type="simulated",
- uct_const=0.5,
+ uct_const=2.,
uniform_first_action=False,
uct_std_normalization=False,
)
|
Increase uct_const for Planner. (#<I>)
|
py
|
diff --git a/nece/managers.py b/nece/managers.py
index <HASH>..<HASH> 100644
--- a/nece/managers.py
+++ b/nece/managers.py
@@ -48,8 +48,8 @@ class TranslationQuerySet(models.QuerySet, TranslationMixin):
def iterator(self):
for obj in super(TranslationQuerySet, self).iterator():
- if self.queryset._language_code:
- obj.language(self.queryset._language_code)
+ if self._language_code:
+ obj.language(self._language_code)
yield obj
|
fix a thing for backwards compatibility
|
py
|
diff --git a/tests/test_tokenization_pegasus.py b/tests/test_tokenization_pegasus.py
index <HASH>..<HASH> 100644
--- a/tests/test_tokenization_pegasus.py
+++ b/tests/test_tokenization_pegasus.py
@@ -57,7 +57,7 @@ class PegasusTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
@require_torch
def test_pegasus_large_seq2seq_truncation(self):
- src_texts = ["This is going to be way too long" * 10000, "short example"]
+ src_texts = ["This is going to be way too long." * 150, "short example"]
tgt_texts = ["not super long but more than 5 tokens", "tiny"]
batch = self.pegasus_large_tokenizer.prepare_seq2seq_batch(src_texts, tgt_texts=tgt_texts, max_target_length=5)
assert batch.input_ids.shape == (2, 1024)
|
Faster pegasus tokenization test with reduced data size (#<I>)
|
py
|
diff --git a/thunder/images/images.py b/thunder/images/images.py
index <HASH>..<HASH> 100644
--- a/thunder/images/images.py
+++ b/thunder/images/images.py
@@ -551,7 +551,7 @@ class Images(Data):
dtype : str, optional, default = None
dtype of one-dimensional ndarray resulting from application of func.
- if not supplied it will be automatically inferred for an extra computational cost.
+ If not supplied it will be automatically inferred for an extra computational cost.
block_size : str, or tuple of block size per dimension, optional, default = '150'
String interpreted as memory size (in megabytes e.g. '64'). Tuple of
|
fixed capitalization in docstring for images.map_as_series
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100755
--- a/setup.py
+++ b/setup.py
@@ -18,6 +18,7 @@ setup(name='mockredispy',
'nose'
],
install_requires=[
+ 'lunatic-python-bugfix==1.1'
],
tests_require=[
'redis>=2.7.2'
|
added install_requires on lunatic-python-bugfix-<I> bug:SS-<I>
|
py
|
diff --git a/wordcloud/wordcloud.py b/wordcloud/wordcloud.py
index <HASH>..<HASH> 100644
--- a/wordcloud/wordcloud.py
+++ b/wordcloud/wordcloud.py
@@ -119,7 +119,7 @@ class WordCloud(object):
max_font_size = height
self.max_font_size = max_font_size
- def _fit_words(self, words):
+ def fit_words(self, words):
"""Generate the positions for words.
Parameters
@@ -232,7 +232,7 @@ class WordCloud(object):
self.layout_ = zip(words, font_sizes, positions, orientations, colors)
return self.layout_
- def _process_text(self, text):
+ def process_text(self, text):
"""Splits a long text into words, eliminates the stopwords.
Parameters
@@ -301,14 +301,14 @@ class WordCloud(object):
def generate(self, text):
"""Generate wordcloud from text.
- Calls _process_text and _fit_words.
+ Calls process_text and fit_words.
Returns
-------
self
"""
- self._process_text(text)
- self._fit_words(self.words_)
+ self.process_text(text)
+ self.fit_words(self.words_)
return self
def _check_generated(self):
|
make process_text and fit_words public again
|
py
|
diff --git a/geoviews/plotting/bokeh/__init__.py b/geoviews/plotting/bokeh/__init__.py
index <HASH>..<HASH> 100644
--- a/geoviews/plotting/bokeh/__init__.py
+++ b/geoviews/plotting/bokeh/__init__.py
@@ -88,7 +88,7 @@ class TilePlot(GeoPlot):
"Element, rendering skipped.")
return {}, {'tile_source': tile_sources[0]}
- def _update_glyphs(self, renderer, properties, mapping, glyph):
+ def _update_glyph(self, renderer, properties, mapping, glyph):
allowed_properties = glyph.properties()
merged = dict(properties, **mapping)
glyph.update(**{k: v for k, v in merged.items()
|
Small compatibility fix for holoviews <I>
|
py
|
diff --git a/_pytest/compat.py b/_pytest/compat.py
index <HASH>..<HASH> 100644
--- a/_pytest/compat.py
+++ b/_pytest/compat.py
@@ -145,7 +145,7 @@ if _PY3:
else:
STRING_TYPES = bytes, str, unicode
- from itertools import imap
+ from itertools import imap # NOQA
def _escape_strings(val):
"""In py2 bytes and str are the same type, so return if it's a bytes
|
mark: fix introduced linting error
|
py
|
diff --git a/indra/assemblers/indranet/net.py b/indra/assemblers/indranet/net.py
index <HASH>..<HASH> 100644
--- a/indra/assemblers/indranet/net.py
+++ b/indra/assemblers/indranet/net.py
@@ -319,7 +319,17 @@ def _simple_scorer_update(G, edge):
s = k
for _ in range(v):
evidence_list.append(Evidence(source_api=s))
- return simple_scorer.score_statement(st=Statement(evidence=evidence_list))
+
+ try:
+ ag_belief = simple_scorer.score_statement(st=Statement(evidence=evidence_list))
+ # Catch underflow
+ except FloatingPointError as err:
+ # Numpy precision
+ NP_PRECISION = 10 ** -np.finfo(np.longfloat).precision
+ logger.warning('%s: Resetting ag_belief to 10*np.longfloat precision '
+ '(%.0e)' % (err, Decimal(NP_PRECISION * 10)))
+ ag_belief = NP_PRECISION * 10
+ return ag_belief
def _complementary_belief(G, edge):
|
Catch underflow error in simple_scorer Uses the same logic as in complementary_belief
|
py
|
diff --git a/numina/user/helpers.py b/numina/user/helpers.py
index <HASH>..<HASH> 100644
--- a/numina/user/helpers.py
+++ b/numina/user/helpers.py
@@ -270,6 +270,7 @@ class BaseWorkEnvironment(object):
os.path.join(self.datadir, value.filename)
)
+ dest = os.path.join(self.workdir, value.filename)
install_if_needed(value.filename, complete, self.workdir)
def copyfiles_stage1(self, obsres):
|
Pass full path to link_if_needed
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -48,5 +48,6 @@ setup(
packages = [
"pgpy",
+ "pgpy.packet",
],
)
\ No newline at end of file
|
added missing package: pgpy.packet
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -1,7 +1,7 @@
from setuptools import setup
-with open('README.md') as readme_file:
+with open('README.rst') as readme_file:
long_description = readme_file.read()
|
Fixed incorrect README file being specified in setup.py
|
py
|
diff --git a/src/scs_core/data/linear_regression.py b/src/scs_core/data/linear_regression.py
index <HASH>..<HASH> 100644
--- a/src/scs_core/data/linear_regression.py
+++ b/src/scs_core/data/linear_regression.py
@@ -46,13 +46,6 @@ class LinearRegression(object):
return len(self) > self.MIN_DATA_POINTS
- def has_tally(self):
- if self.__tally is None:
- raise ValueError("no tally set")
-
- return len(self) >= self.__tally
-
-
def append(self, rec: LocalizedDatetime, value):
count = len(self)
|
Fixed a bug in LinearRegression clients.
|
py
|
diff --git a/pyghmi/redfish/command.py b/pyghmi/redfish/command.py
index <HASH>..<HASH> 100644
--- a/pyghmi/redfish/command.py
+++ b/pyghmi/redfish/command.py
@@ -469,6 +469,17 @@ class Command(object):
currsettings[setting] = {'value': biosinfo['Attributes'][setting]}
return currsettings
+ def clear_system_configuration(self):
+ """Clear the BIOS/UEFI configuration
+
+ """
+ biosinfo = self._do_web_request(self._biosurl)
+ rb = biosinfo.get('Actions', {}).get('#Bios.ResetBios', {})
+ rb = rb.get('target', '')
+ if not rb:
+ raise Exception('BIOS reset not detected on this system')
+ self._do_web_request(rb, {'Action': 'Bios.ResetBios'})
+
def set_system_configuration(self, changeset):
redfishsettings = {'Attributes': changeset}
self._do_web_request(self._setbiosurl, redfishsettings, 'PATCH')
|
Add clear system configuration to redfish Implement clear uefi configuration for redfish targets Change-Id: Id<I>c<I>b<I>ae<I>ae<I>ad3d8bcc0b<I>b
|
py
|
diff --git a/tests/test_serializer.py b/tests/test_serializer.py
index <HASH>..<HASH> 100644
--- a/tests/test_serializer.py
+++ b/tests/test_serializer.py
@@ -45,15 +45,14 @@ class TestCase(unittest.TestCase, treewalkers._base.TreeWalker):
exception = None
result = self.serialize_html(input, options)
for alternative in expected:
- try:
- self.assertEquals(alternative, result)
- except AssertionError:
- pass
- else:
+ if alternative == result:
break
else:
- options["omit_optional_tags"] = False
- self.assertEquals(self.serialize_html(input, options), result)
+ if options.get("omit_optional_tags", True):
+ options["omit_optional_tags"] = False
+ self.assertEquals(self.serialize_html(input, options), result)
+ else:
+ self.fail("Expected: %s, Received: %s" % (expected, result))
def serialize_html(self, input, options):
return u''.join(serializer.HTMLSerializer( \
|
Small (very small) improvements when tested with multiple alternate expected output (no longer relies on assertEquals and an AssertionError being raised; now makes comparison with == and eventually call fail() with a sensible error message) --HG-- extra : convert_revision : svn%3Aacbfec<I>-<I>-<I>-a<I>-<I>a<I>e<I>e0/trunk%<I>
|
py
|
diff --git a/spyder/plugins/ipythonconsole/tests/test_ipythonconsole.py b/spyder/plugins/ipythonconsole/tests/test_ipythonconsole.py
index <HASH>..<HASH> 100644
--- a/spyder/plugins/ipythonconsole/tests/test_ipythonconsole.py
+++ b/spyder/plugins/ipythonconsole/tests/test_ipythonconsole.py
@@ -1304,6 +1304,8 @@ def test_console_working_directory(ipyconsole, qtbot):
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.use_startup_wdir
[email protected](not sys.platform.startswith('linux') or PY2,
+ reason="It only works on Linux with python 3.")
def test_console_complete(ipyconsole, qtbot):
"""Test for checking the working directory."""
shell = ipyconsole.get_current_shellwidget()
|
only run test complete on travis
|
py
|
diff --git a/pagoda/parser.py b/pagoda/parser.py
index <HASH>..<HASH> 100644
--- a/pagoda/parser.py
+++ b/pagoda/parser.py
@@ -381,13 +381,13 @@ class BodyParser(Parser):
joint.axes = axes[:max(joint.ADOF, joint.LDOF)]
if joint.ADOF and lo_stops is not None:
- joint.amotor.lo_stops = lo_stops
+ joint.lo_stops = lo_stops
if joint.ADOF and hi_stops is not None:
- joint.amotor.hi_stops = hi_stops
+ joint.hi_stops = hi_stops
if joint.ADOF and stop_cfm is not None:
- joint.amotor.stop_cfms = stop_cfm
+ joint.stop_cfms = stop_cfm
if joint.ADOF and stop_erp is not None:
- joint.amotor.stop_erps = stop_erp
+ joint.stop_erps = stop_erp
self.joints.append(joint)
|
Apply joint stops to the joint, not the motor.
|
py
|
diff --git a/indra/db/query_db_stmts.py b/indra/db/query_db_stmts.py
index <HASH>..<HASH> 100644
--- a/indra/db/query_db_stmts.py
+++ b/indra/db/query_db_stmts.py
@@ -63,7 +63,8 @@ def by_gene_role_type(agent_id=None, agent_ns='HGNC', role=None,
clauses.append(db.Agents.stmt_id == db.Statements.id)
if stmt_type:
clauses.append(db.Statements.type == stmt_type)
- stmts = get_statements(clauses, count=count, do_stmt_count=do_stmt_count)
+ stmts = get_statements(clauses, count=count, do_stmt_count=do_stmt_count,
+ db=db)
return stmts
|
Fix by_gene_role_type function. The database instance that is passed used by the function is now passed down to the get_statments function when called inside by_gene_role_type.
|
py
|
diff --git a/client.py b/client.py
index <HASH>..<HASH> 100644
--- a/client.py
+++ b/client.py
@@ -175,7 +175,6 @@ class Client(object):
old_nick = self.player.nick
self.player = Player()
self.player.nick = old_nick
- self.subscriber.on_ingame()
return True
def connect_retry(self, url=None, token=None, tries=-1):
@@ -340,6 +339,7 @@ class Client(object):
self.player.world.top_left = Vec(top, left)
self.player.world.bottom_right = Vec(bottom, right)
self.player.center = self.world.center
+ self.subscriber.on_ingame()
def parse_spectate_update(self, buf):
# only in spectate mode
|
Emit on_ingame() only when receiving world_rect
|
py
|
diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py
index <HASH>..<HASH> 100644
--- a/insights/specs/sos_archive.py
+++ b/insights/specs/sos_archive.py
@@ -92,6 +92,8 @@ class SosSpecs(Specs):
'sos_commands/general/subscription-manager_list_--installed']
)
sysctl = simple_file("sos_commands/kernel/sysctl_-a")
+ systemctl_list_unit_files = simple_file("sos_commands/systemd/systemctl_list-unit-files")
+ systemctl_list_units = simple_file("sos_commands/systemd/systemctl_list-units")
uname = simple_file("sos_commands/kernel/uname_-a")
uptime = simple_file("sos_commands/general/uptime")
vgdisplay = simple_file("vgdisplay")
|
Copied simple file specs changes from insights_archive to sos_archive (#<I>)
|
py
|
diff --git a/pymongo/collection.py b/pymongo/collection.py
index <HASH>..<HASH> 100644
--- a/pymongo/collection.py
+++ b/pymongo/collection.py
@@ -952,6 +952,19 @@ class Collection(common.BaseObject, Generic[_DocumentType]):
{'x': 1, '_id': 1}
{'x': 1, '_id': 2}
+ If ``upsert=True`` and no documents match the filter, create a
+ new document based on the filter criteria and update modifications.
+
+ >>> result = db.test.update_one({'x': -10}, {'$inc': {'x': 3}}, upsert=True)
+ >>> result.matched_count
+ 0
+ >>> result.modified_count
+ 0
+ >>> result.upserted_id
+ ObjectId('626a678eeaa80587d4bb3fb7')
+ >>> db.test.find_one(result.upserted_id)
+ {'_id': ObjectId('626a678eeaa80587d4bb3fb7'), 'x': -7}
+
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The modifications to apply.
|
PYTHON-<I> The doc should clarify that the resulting documents that are produced with upserts are constructed from both the filter and the update params (#<I>)
|
py
|
diff --git a/searx/search.py b/searx/search.py
index <HASH>..<HASH> 100644
--- a/searx/search.py
+++ b/searx/search.py
@@ -69,6 +69,14 @@ def make_callback(engine_name, results_queue, callback, params):
def process_callback(response, **kwargs):
response.search_params = params
+ timeout_overhead = 0.2 # seconds
+ search_duration = time() - params['started']
+ timeout_limit = engines[engine_name].timeout + timeout_overhead
+ if search_duration > timeout_limit:
+ engines[engine_name].stats['page_load_time'] += timeout_limit
+ engines[engine_name].stats['errors'] += 1
+ return
+
# callback
try:
search_results = callback(response)
@@ -81,14 +89,6 @@ def make_callback(engine_name, results_queue, callback, params):
engine_name, str(e))
return
- timeout_overhead = 0.2 # seconds
- search_duration = time() - params['started']
- timeout_limit = engines[engine_name].timeout + timeout_overhead
- if search_duration > timeout_limit:
- engines[engine_name].stats['page_load_time'] += timeout_limit
- engines[engine_name].stats['errors'] += 1
- return
-
# add results
for result in search_results:
result['engine'] = engine_name
|
[fix] timeout and response parsing order
|
py
|
diff --git a/docs/source/conf.py b/docs/source/conf.py
index <HASH>..<HASH> 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -189,7 +189,7 @@ for name in "example_movies example_start example_volume_rendering example_virtu
if on_rtd:
# nb convert on rtd puts the output in the same directory..
import shutil
- shutil.move("../../" + dest, dest)
+ shutil.move("../../examples/" + dest, dest)
else:
print("%s is already up to date" % name)
html_extra_path.append(source)
|
move the html file by hand (fix) [skip ci]
|
py
|
diff --git a/lib/imageframe.py b/lib/imageframe.py
index <HASH>..<HASH> 100644
--- a/lib/imageframe.py
+++ b/lib/imageframe.py
@@ -793,7 +793,8 @@ Keyboard Shortcuts: (For Mac OSX, replace 'Ctrl' with 'Apple')
self.cmap_panels[col].imax_val.SetValue('%.4g' % imax)
if enhance:
jmin, jmax = np.percentile(img, [clevel, 100.0-clevel])
-
+ if imax == imin:
+ imax = imin + 0.5
conf.cmap_lo[col] = xlo = (jmin-imin)*conf.cmap_range/(imax-imin)
conf.cmap_hi[col] = xhi = (jmax-imin)*conf.cmap_range/(imax-imin)
@@ -812,7 +813,8 @@ Keyboard Shortcuts: (For Mac OSX, replace 'Ctrl' with 'Apple')
self.cmap_panels[ix].imax_val.SetValue('%.4g' % imax)
if enhance:
jmin, jmax = np.percentile(img[:,:,ix], [1, 99])
-
+ if imax == imin:
+ imax = imin + 0.5
conf.cmap_lo[ix] = xlo = (jmin-imin)*conf.cmap_range/(imax-imin)
conf.cmap_hi[ix] = xhi = (jmax-imin)*conf.cmap_range/(imax-imin)
self.cmap_panels[ix].cmap_hi.SetValue(xhi)
|
avoid divide by zeros on blank images
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -17,7 +17,7 @@ from setuptools import setup, find_packages
from setup_helpers import get_version, readme
-INSTALL_REQUIRES = ['blessed>=1.16.0']
+INSTALL_REQUIRES = ['blessed>=1.16.1']
TESTS_REQUIRE = ['mock; python_version < "3.3"',
'unittest2; python_version < "2.7"']
|
Require blessed <I>
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -47,6 +47,8 @@ setup(
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3.4',
+ 'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
@@ -60,7 +62,7 @@ setup(
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
- install_requires=['beets', 'lxml', 'requests'],
+ install_requires=['mediafile', 'lxml', 'requests'],
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
|
Update setup.py with Python 3 changes Use mediafile, not beets, and support Python <I> and up.
|
py
|
diff --git a/quark/api/extensions/subnets_quark.py b/quark/api/extensions/subnets_quark.py
index <HASH>..<HASH> 100644
--- a/quark/api/extensions/subnets_quark.py
+++ b/quark/api/extensions/subnets_quark.py
@@ -20,7 +20,7 @@ from quantum.api.v2 import attributes
EXTENDED_ATTRIBUTES_2_0 = {
'subnets': {
- "allocation_pools": {'allow_post': False, 'allow_put': False,
+ "allocation_pools": {'allow_post': True, 'allow_put': True,
'default': attributes.ATTR_NOT_SPECIFIED,
'is_visible': False},
"enable_dhcp": {'allow_post': False, 'allow_put': False,
@@ -34,7 +34,6 @@ class Subnets_quark(extensions.ExtensionDescriptor):
"""Extends subnets for quark API purposes.
* Shunts enable_dhcp to false
- * Disables allocation_pools
"""
@classmethod
|
Allows allocation_pools in subnet create Re-enables allocation pool POST and PUT on subnet REST verbs. Currently unimplemented, but at least we're honoring part of the spec.
|
py
|
diff --git a/vc_zoom/indico_vc_zoom/plugin.py b/vc_zoom/indico_vc_zoom/plugin.py
index <HASH>..<HASH> 100644
--- a/vc_zoom/indico_vc_zoom/plugin.py
+++ b/vc_zoom/indico_vc_zoom/plugin.py
@@ -410,7 +410,7 @@ class ZoomPlugin(VCPluginMixin, IndicoPlugin):
for room in VCRoom.query.filter(
VCRoom.type == self.service_name, VCRoom.data.contains({'host': source.identifier})
):
- room.data['host'] = target.id
+ room.data['host'] = target.identifier
flag_modified(room, 'data')
def get_notification_cc_list(self, action, vc_room, event):
|
VC/Zoom: fix bug with merging
|
py
|
diff --git a/wandb/cli.py b/wandb/cli.py
index <HASH>..<HASH> 100644
--- a/wandb/cli.py
+++ b/wandb/cli.py
@@ -440,11 +440,14 @@ def init(ctx):
click.echo(click.style("This directory is configured! Next, track a run:\n", fg="green") +
textwrap.dedent("""\
- * `{code}` in your training script
+ * In your training script:
+ {code1}
+ {code2}
* then `{run}`.
""").format(
- code=click.style("import wandb", bold=True),
- run=click.style("wandb run <training_command>", bold=True),
+ code1=click.style("import wandb", bold=True),
+ code2=click.style("wandb.init()", bold=True),
+ run=click.style("wandb run <train.py>", bold=True),
# saving this here so I can easily put it back when we re-enable
# push/pull
#"""
|
Update post "wandb init" instructions.
|
py
|
diff --git a/satpy/resample.py b/satpy/resample.py
index <HASH>..<HASH> 100644
--- a/satpy/resample.py
+++ b/satpy/resample.py
@@ -834,7 +834,8 @@ class BucketResamplerBase(BaseResampler):
def precompute(self, **kwargs):
"""Create X and Y indices and store them for later use."""
LOG.debug("Initializing bucket resampler.")
- source_lons, source_lats = self.source_geo_def.get_lonlats()
+ source_lons, source_lats = self.source_geo_def.get_lonlats(
+ chunks=CHUNK_SIZE)
self.resampler = bucket.BucketResampler(self.target_geo_def,
source_lons,
source_lats)
|
Ensure coordinates are as dask arrays
|
py
|
diff --git a/snakebite/client.py b/snakebite/client.py
index <HASH>..<HASH> 100644
--- a/snakebite/client.py
+++ b/snakebite/client.py
@@ -753,7 +753,7 @@ class Client(object):
# Source is a file
elif self._is_file(node):
temporary_target = "%s._COPYING_" % target
- f = open(temporary_target, 'w')
+ f = open(temporary_target, 'wb')
try:
for load in self._read_file(path, node, tail_only=False, check_crc=check_crc):
f.write(load)
|
Changing the 'open' command to open a temporary file as binary. The lack of this parameter causes a bug when using copyToLocal to copy a file from a Linux HDFS cluster to a Windows client box. The file that ends up on Windows isn't byte-identical to the starting file on Linux, due to some end-of-line conversion issue.
|
py
|
diff --git a/tests/test_pages.py b/tests/test_pages.py
index <HASH>..<HASH> 100644
--- a/tests/test_pages.py
+++ b/tests/test_pages.py
@@ -329,6 +329,16 @@ def test_add_foreign_twice(graph, outpdf):
out.save(outpdf)
[email protected](reason="needs qpdf fix to issue 514")
+def test_add_twice_without_copy_foreign(graph, outpdf):
+ out = Pdf.new()
+ out.pages.append(graph.pages[0])
+ assert len(out.pages) == 1
+ out.pages.append(graph.pages[0])
+ assert len(out.pages) == 2
+ out.save(outpdf)
+
+
def test_repr_pagelist(fourpages):
assert '4' in repr(fourpages.pages)
|
tests: "document" via tests that copying duplicate foreign pages doesn't work
|
py
|
diff --git a/log_request_id/tests.py b/log_request_id/tests.py
index <HASH>..<HASH> 100644
--- a/log_request_id/tests.py
+++ b/log_request_id/tests.py
@@ -163,6 +163,7 @@ class RequestIDPassthroughTestCase(TestCase):
self.assertRaises(ImproperlyConfigured, inner)
+# asgiref is required from Django 3.0
if async_to_sync:
class AsyncRequestIDLoggingTestCase(RequestIDLoggingTestCase):
|
Add comment to explain Django version differences in asgiref
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.