diff
stringlengths 139
3.65k
| message
stringlengths 8
627
| diff_languages
stringclasses 1
value |
---|---|---|
diff --git a/mongonaut/forms.py b/mongonaut/forms.py
index <HASH>..<HASH> 100644
--- a/mongonaut/forms.py
+++ b/mongonaut/forms.py
@@ -3,33 +3,9 @@ from django import forms
from mongoengine.fields import EmbeddedDocumentField, ListField
from mongonaut.widgets import get_widget
-
-
class DocumentListForm(forms.Form):
""" The main document list form """
mongo_id = forms.MultipleChoiceField(required=True, widget=forms.CheckboxSelectMultiple)
-
-
-class DocumentDetailFormFactory(object):
- """ Used to generate DocumentDetailForms for the DocumentDetailView
- TODO: Prolly nix
- """
-
- def __init__(self, document, document_type):
-
- self.document_type
- self.document = document
- self.form = DocumentDetailForm()
-
- for key in sorted([x for x in self.document_type._fields.keys() if x != 'id']):
- # TODO - skip EmbeddedDocumentField and ListField for now
- if isinstance(self.document._fields[key], EmbeddedDocumentField):
- continue
- if isinstance(self.document._fields[key], ListField):
- continue
- self.form.fields[key].append(
- get_widget('TODO - assign value')
- )
def document_detail_form_initial(form, document_type, document):
""" Adds document field to a form. Not sure what to call this but Factory is not it."""
|
Nixing icky FormFactory that wouldn't work anyway. So there
|
py
|
diff --git a/test/test_chapeldomain.py b/test/test_chapeldomain.py
index <HASH>..<HASH> 100644
--- a/test/test_chapeldomain.py
+++ b/test/test_chapeldomain.py
@@ -309,6 +309,23 @@ class ChapelClassMemberTests(ChapelObjectTestCase):
mod = self.new_obj(objtype)
self.assertEqual(expected, mod.needs_arglist())
+ def test_chpl_type_name(self):
+ """Verify chpl_type_name property for different objtypes."""
+ test_cases = [
+ ('function', ''),
+ ('iterfunction', ''),
+ ('type', ''),
+ ('data', ''),
+ ('attribute', ''),
+ ('method', 'method'),
+ ('itermethod', 'iterator'),
+ ('opfunction', ''),
+ ('opmethod', 'operator'),
+ ]
+ for objtype, expected_type in test_cases:
+ mod = self.new_obj(objtype)
+ self.assertEqual(expected_type, mod.chpl_type_name)
+
class ChapelObjectTests(ChapelObjectTestCase):
"""ChapelObject tests."""
|
test: add ClassMember type name tests
|
py
|
diff --git a/autoslugfield/__init__.py b/autoslugfield/__init__.py
index <HASH>..<HASH> 100644
--- a/autoslugfield/__init__.py
+++ b/autoslugfield/__init__.py
@@ -1 +1,6 @@
-
+# -*- coding: utf-8 -*-
+
+__author__ = 'Andy Mikhailenko'
+__license__ = 'GNU Lesser General Public License (GPL), Version 3'
+__url__ = 'http://bitbucket.org/neithere/django-autoslugfield/'
+__version__ = '0.9'
|
Add setuptools support (part 2 of 2)
|
py
|
diff --git a/src/urh/dev/native/Device.py b/src/urh/dev/native/Device.py
index <HASH>..<HASH> 100644
--- a/src/urh/dev/native/Device.py
+++ b/src/urh/dev/native/Device.py
@@ -69,7 +69,11 @@ class Device(QObject):
def init_recv_buffer(self):
if self.receive_buffer is None:
- nsamples = int(0.6*(psutil.virtual_memory().available / 8))
+ if self.is_ringbuffer:
+ nsamples = 2 ** 16
+ else:
+ # Take 60% of avail memory
+ nsamples = int(0.6*(psutil.virtual_memory().available / 8))
self.receive_buffer = np.zeros(nsamples, dtype=np.complex64, order='C')
logger.info("Initialized receiving buffer with size {0:.2f}MB".format(self.receive_buffer.nbytes / (1024 * 1024)))
|
fix buffer size for native spectrum analyzer
|
py
|
diff --git a/cluster_vcf_records/variant_tracking.py b/cluster_vcf_records/variant_tracking.py
index <HASH>..<HASH> 100644
--- a/cluster_vcf_records/variant_tracking.py
+++ b/cluster_vcf_records/variant_tracking.py
@@ -513,7 +513,7 @@ class VariantTracker:
best_merged_index = None
merged_record, merged_vars, merged_var_ids = self.clusters[-1]
for i in reversed(range(len(self.clusters) - 1)):
- if merged_record.CHROM != self.clusters[i][0].CHROM or merged_record.ref_end_pos() + 10 < self.clusters[i][0].POS:
+ if merged_record.CHROM != self.clusters[i][0].CHROM or self.clusters[i][0].ref_end_pos() + 10 < merged_record.POS:
break
have_same_combos = vcf_records_make_same_allele_combination(self.clusters[i][0], merged_record, self.ref_seqs)
|
Fix check between record distances before merging
|
py
|
diff --git a/mbed_cloud_sdk/devices/mds/rest.py b/mbed_cloud_sdk/devices/mds/rest.py
index <HASH>..<HASH> 100644
--- a/mbed_cloud_sdk/devices/mds/rest.py
+++ b/mbed_cloud_sdk/devices/mds/rest.py
@@ -104,6 +104,7 @@ class RESTClientObject(object):
# https pool manager
self.pool_manager = urllib3.PoolManager(
+ maxsize=4,
num_pools=pools_size,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
|
Add maxsize arg, which is now in swagger-codegen master
|
py
|
diff --git a/libargos/config/basecti.py b/libargos/config/basecti.py
index <HASH>..<HASH> 100644
--- a/libargos/config/basecti.py
+++ b/libargos/config/basecti.py
@@ -47,13 +47,12 @@ class BaseCti(BaseTreeItem):
"""
super(BaseCti, self).__init__(nodeName=nodeName)
- self._value = None # keep pylint happy
self._defaultValue = self._convertValueType(defaultValue)
-
- if value is NOT_SPECIFIED:
- value = self.defaultValue
- self.value = value
+ if value is NOT_SPECIFIED:
+ self._value = self.defaultValue
+ else:
+ self._value = self._convertValueType(value)
def __eq__(self, other):
|
Consistent intitialization.
|
py
|
diff --git a/kubespawner/reflector.py b/kubespawner/reflector.py
index <HASH>..<HASH> 100644
--- a/kubespawner/reflector.py
+++ b/kubespawner/reflector.py
@@ -158,12 +158,7 @@ class ResourceReflector(LoggingConfigurable):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
- # Load kubernetes config here, since this is a Singleton and
- # so this __init__ will be run way before anything else gets run.
- try:
- config.load_incluster_config()
- except config.ConfigException:
- config.load_kube_config()
+ # client configuration for kubernetes has already taken place
self.api = shared_client(self.api_group_name)
# FIXME: Protect against malicious labels?
|
Move global kubernetes client config logic to spawner.py Previously reflector.py was the first to call self.api = shared_client that is not the case any longer, spawner.py calls it just before kicking off reflector.py code. In my opinion, putting the global config logic in spawner.py is more intuitive and clear than in reflector.py, especially with new traitlet config options set on the spawner.KubeSpawner object.
|
py
|
diff --git a/metaknowledge/graphHelpers.py b/metaknowledge/graphHelpers.py
index <HASH>..<HASH> 100644
--- a/metaknowledge/graphHelpers.py
+++ b/metaknowledge/graphHelpers.py
@@ -803,11 +803,11 @@ def graphStats(G, stats = ('nodes', 'edges', 'isolates', 'loops', 'density', 'tr
if 'loops' in stats:
if makeString:
if sentenceString:
- stsData.append("{:G} self loops".format(len(list(G.selfloop_edges()))))
+ stsData.append("{:G} self loops".format(len(list(nx.selfloop_edges(G)))))
else:
- stsData.append("Self loops: {:G}".format(len(list(G.selfloop_edges()))))
+ stsData.append("Self loops: {:G}".format(len(list(nx.selfloop_edges(G)))))
else:
- stsData['loops'] = len(list(G.selfloop_edges()))
+ stsData['loops'] = len(list(nx.selfloop_edges(G)))
if 'density' in stats:
if makeString:
if sentenceString:
|
Changed .selfloop_edges to align with networkx <I>
|
py
|
diff --git a/src/cone/ugm/browser/settings.py b/src/cone/ugm/browser/settings.py
index <HASH>..<HASH> 100644
--- a/src/cone/ugm/browser/settings.py
+++ b/src/cone/ugm/browser/settings.py
@@ -5,7 +5,6 @@ from node.ext.ldap import (
BASE,
ONELEVEL,
SUBTREE,
- queryNode,
LDAPNode,
)
from yafowil.base import (
@@ -110,7 +109,7 @@ class CreateContainerAction(Tile):
except Exception:
raise Exception(u"Invalid DN.")
rdn = explode_dn(dn)[0]
- node = queryNode(props, parent_dn)
+ node = LDAPNode(parent_dn, props)
if node is None:
raise Exception(u"Parent not found. Can't continue.")
node[rdn] = LDAPNode()
|
use LDAPNode directly since queryNode seems a bit strange and superfluos here
|
py
|
diff --git a/src/gnupg.py b/src/gnupg.py
index <HASH>..<HASH> 100644
--- a/src/gnupg.py
+++ b/src/gnupg.py
@@ -1065,19 +1065,6 @@ class GPGWrapper(GPG):
raise LookupError(
"GnuPG public key for subkey %s not found!" % subkey)
- def encrypt(self, data, recipient, default_key=None, always_trust=True,
- passphrase=None, symmetric=False):
- """
- Encrypt data using GPG.
- """
- # TODO: devise a way so we don't need to "always trust".
- return super(GPGWrapper, self).encrypt(data, recipient,
- default_key=default_key,
- always_trust=always_trust,
- passphrase=passphrase,
- symmetric=symmetric,
- cipher_algo='AES256')
-
def send_keys(self, keyserver, *keyids):
"""Send keys to a keyserver."""
result = self._result_map['list'](self)
|
Deduplicate code in GPGWrapper.
|
py
|
diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py
index <HASH>..<HASH> 100644
--- a/src/transformers/training_args.py
+++ b/src/transformers/training_args.py
@@ -803,12 +803,12 @@ class TrainingArguments:
)
},
)
- fsdp_transformer_layer_cls_to_wrap: str = field(
+ fsdp_transformer_layer_cls_to_wrap: Optional[str] = field(
default=None,
metadata={
"help": (
"Transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... "
- "(useful only when `fsdp` flag is passed).",
+ "(useful only when `fsdp` flag is passed)."
)
},
)
|
Fix TrainingArguments help section (#<I>)
|
py
|
diff --git a/spyder/preferences/runconfig.py b/spyder/preferences/runconfig.py
index <HASH>..<HASH> 100644
--- a/spyder/preferences/runconfig.py
+++ b/spyder/preferences/runconfig.py
@@ -336,6 +336,8 @@ class BaseRunConfigDialog(QDialog):
def __init__(self, parent=None):
QDialog.__init__(self, parent)
+ self.setWindowFlags(
+ self.windowFlags() & ~Qt.WindowContextHelpButtonHint)
# Destroying the C++ object right after closing the dialog box,
# otherwise it may be garbage-collected in another QThread
|
BaseRunConfigDialog: Remove the ? button in the window bar. This is useless because we don't use that feature.
|
py
|
diff --git a/python/test/test_async.py b/python/test/test_async.py
index <HASH>..<HASH> 100644
--- a/python/test/test_async.py
+++ b/python/test/test_async.py
@@ -373,8 +373,8 @@ proxies = [
# prefer local testing keys to global keys
keys_folder = os.path.dirname(root)
-keys_global = os.path.join (keys_folder, 'keys.json')
-keys_local = os.path.join (keys_folder, 'keys.local.json')
+keys_global = os.path.join(keys_folder, 'keys.json')
+keys_local = os.path.join(keys_folder, 'keys.local.json')
keys_file = keys_local if os.path.exists(keys_local) else keys_global
# load the api keys from config
|
PEP8 edits & cleanup
|
py
|
diff --git a/pipenv/utils.py b/pipenv/utils.py
index <HASH>..<HASH> 100644
--- a/pipenv/utils.py
+++ b/pipenv/utils.py
@@ -1350,8 +1350,12 @@ def is_virtual_environment(path):
if not path.is_dir():
return False
for bindir_name in ('bin', 'Scripts'):
- for python_like in path.joinpath(bindir_name).glob('python*'):
- if python_like.is_file() and os.access(str(python_like), os.X_OK):
+ for python in path.joinpath(bindir_name).glob('python*'):
+ try:
+ exeness = python.is_file() and os.access(str(python), os.X_OK)
+ except OSError:
+ exeness = False
+ if exeness:
return True
return False
|
Ignore OSError in is_virtual_environment check This works around a faulty virtual environment on my machine that makes os.access throw "OSError: too many level of symlinks". If a virtual environment is faulty, we can just ignore it.
|
py
|
diff --git a/cherrypy/lib/profiler.py b/cherrypy/lib/profiler.py
index <HASH>..<HASH> 100644
--- a/cherrypy/lib/profiler.py
+++ b/cherrypy/lib/profiler.py
@@ -32,6 +32,15 @@ module from the command line, it will call serve() for you.
"""
+# Make profiler output more readable by adding __init__ modules' parents.
+def new_func_strip_path(func_name):
+ filename, line, name = func_name
+ if filename.endswith("__init__.py"):
+ return os.path.basename(filename[:-12]) + filename[-12:], line, name
+ return os.path.basename(filename), line, name
+import pstats
+pstats.func_strip_path = new_func_strip_path
+
import hotshot
import os, os.path
import sys
|
Made profiler output more readable by prepending __init__.py modules' parent folders.
|
py
|
diff --git a/mockupdb/__init__.py b/mockupdb/__init__.py
index <HASH>..<HASH> 100755
--- a/mockupdb/__init__.py
+++ b/mockupdb/__init__.py
@@ -1046,7 +1046,7 @@ class MockupDB(object):
>>> future = go(client.db.command, 'foo')
>>> s.got('foo')
True
- >>> s.got(Command('foo'))
+ >>> s.got(Command('foo', namespace='db'))
True
>>> s.got(Command('foo', key='value'))
False
|
Test that ".$cmd" is trimmed from command namespace.
|
py
|
diff --git a/src/kba/pipeline/run.py b/src/kba/pipeline/run.py
index <HASH>..<HASH> 100644
--- a/src/kba/pipeline/run.py
+++ b/src/kba/pipeline/run.py
@@ -8,7 +8,7 @@ Copyright 2012 Diffeo, Inc.
'''
import os
import sys
-from . import Pipeline
+from _pipeline import Pipeline
def make_absolute_paths( config ):
## remove the root_path, so it does not get extended itself
|
converting "from . import" to "from _pipeline import"
|
py
|
diff --git a/nrrd.py b/nrrd.py
index <HASH>..<HASH> 100644
--- a/nrrd.py
+++ b/nrrd.py
@@ -28,20 +28,6 @@ def _convert_to_reproducible_floatingpoint( x ):
value = str(x)
return value
-def _nrrd_read_header_lines(nrrdfile):
- """Read header lines from a .nrrd/.nhdr file."""
- line = nrrdfile.readline().decode('ascii')
- if line[:-2] != 'NRRD000':
- raise NrrdError('Missing magic "NRRD" word, is this an NRRD file?')
- if line[-2] > '5':
- raise NrrdError('NRRD file version too new for this library.')
- headerlines = []
- while line != '\n' and line != '':
- headerlines.append(line)
- line = nrrdfile.readline().decode('ascii')
-
- return headerlines
-
_TYPEMAP_NRRD2NUMPY = {
'signed char': 'i1',
'int8': 'i1',
|
ENH: Removed unused function from file
|
py
|
diff --git a/crosspm/adapters/artifactory.py b/crosspm/adapters/artifactory.py
index <HASH>..<HASH> 100644
--- a/crosspm/adapters/artifactory.py
+++ b/crosspm/adapters/artifactory.py
@@ -144,7 +144,7 @@ class Adapter(BaseAdapter):
_package.find_dependencies(_deps_file)
elif self._config.deps_file_name:
_deps_file = _package.get_file(self._config.deps_file_name, downloader.temp_path)
- if os.path.isfile(_deps_file):
+ if _deps_file and os.path.isfile(_deps_file):
_package.find_dependencies(_deps_file)
return _packages_found
|
bugfix2: package without dependencies file + dependencies file name config not set caused an error.
|
py
|
diff --git a/parsl/dataflow/usage_tracking/usage.py b/parsl/dataflow/usage_tracking/usage.py
index <HASH>..<HASH> 100644
--- a/parsl/dataflow/usage_tracking/usage.py
+++ b/parsl/dataflow/usage_tracking/usage.py
@@ -6,8 +6,10 @@ import getpass
import json
import logging
import socket
+import sys
from parsl.dataflow.states import States
+from parsl.version import VERSION as PARSL_VERSION
logger = logging.getLogger(__name__)
@@ -48,6 +50,10 @@ class UsageTracker (object):
self.dfk = dfk
self.config = self.dfk.config
self.uuid = str(uuid.uuid4())
+ self.parsl_version = PARSL_VERSION
+ self.python_version = "{}.{}.{}".format(sys.version_info.major,
+ sys.version_info.minor,
+ sys.version_info.micro)
self.test_mode, self.tracking_enabled = self.check_tracking_enabled()
logger.debug("Tracking status: {}".format(self.tracking_enabled))
logger.debug("Testing mode : {}".format(self.test_mode))
@@ -93,6 +99,8 @@ class UsageTracker (object):
'uname': hashed_username,
'hname': hashed_hostname,
'test': self.test_mode,
+ 'parsl_v': self.parsl_version,
+ 'python_v': self.python_version,
'start': time.time()}
return json.dumps(message)
|
Adding version info from parsl and python to usage tracking. Fixes issue #<I>
|
py
|
diff --git a/plenum/test/zstack_tests/test_zstack_reconnection.py b/plenum/test/zstack_tests/test_zstack_reconnection.py
index <HASH>..<HASH> 100644
--- a/plenum/test/zstack_tests/test_zstack_reconnection.py
+++ b/plenum/test/zstack_tests/test_zstack_reconnection.py
@@ -45,9 +45,9 @@ def testZStackNodeReconnection(tconf, looper, txnPoolNodeSet, client1, wallet1,
checkFlakyConnected(True)
nodeToCrash.stop()
looper.removeProdable(nodeToCrash)
- looper.runFor(2)
- looper.run(eventually(checkFlakyConnected, False, retryWait=1, timeout=20))
- looper.runFor(3)
+ looper.runFor(1)
+ looper.run(eventually(checkFlakyConnected, False, retryWait=1, timeout=35))
+ looper.runFor(1)
node = TestNode(nodeToCrash.name, basedirpath=tdirWithPoolTxns, config=tconf,
ha=nodeToCrash.nodestack.ha, cliha=nodeToCrash.clientstack.ha)
looper.add(node)
|
changing timeouts so the diconnect check happens twice
|
py
|
diff --git a/awacs/helpers/trust.py b/awacs/helpers/trust.py
index <HASH>..<HASH> 100644
--- a/awacs/helpers/trust.py
+++ b/awacs/helpers/trust.py
@@ -23,3 +23,20 @@ def get_default_assumerole_policy(region=''):
]
)
return policy
+
+
+def get_ecs_assumerole_policy(region=''):
+ """ Helper function for building the ECS AssumeRole Policy
+ """
+
+ service = 'ecs.amazonaws.com'
+ policy = Policy(
+ Statement=[
+ Statement(
+ Principal=Principal('Service', [service]),
+ Effect=Allow,
+ Action=[sts.AssumeRole]
+ )
+ ]
+ )
+ return policy
|
Add new ECS assume role trust policy This is used by the ecsServiceRole to allow ECS to assume role to setup things like ELBs for services.
|
py
|
diff --git a/python_modules/dagit/dagit/webserver.py b/python_modules/dagit/dagit/webserver.py
index <HASH>..<HASH> 100644
--- a/python_modules/dagit/dagit/webserver.py
+++ b/python_modules/dagit/dagit/webserver.py
@@ -77,7 +77,6 @@ class DagitWebserver(GraphQLServer, Generic[T_IWorkspaceProcessContext]):
def make_security_headers(self) -> dict:
return {
"Cache-Control": "no-store",
- "Clear-Site-Data": "*",
"Feature-Policy": "microphone 'none'; camera 'none'",
"Referrer-Policy": "strict-origin-when-cross-origin",
"X-Content-Type-Options": "nosniff",
|
[dagit] Remove clear-site-data header (#<I>) ### Summary & Motivation We don't really need this, there's useful stuff in our site data. ### How I Tested These Changes Run dagit, verify that the header is not included in the response.
|
py
|
diff --git a/picoweb/__init__.py b/picoweb/__init__.py
index <HASH>..<HASH> 100644
--- a/picoweb/__init__.py
+++ b/picoweb/__init__.py
@@ -57,7 +57,7 @@ class HTTPRequest:
class WebApp:
def __init__(self, routes):
- self.routes = routes
+ self.url_map = routes
def _handle(self, reader, writer):
print(reader, writer)
@@ -74,7 +74,7 @@ class WebApp:
print((method, path, proto), headers)
req = HTTPRequest(method, path, headers)
found = False
- for pattern, handler, *extra in self.routes:
+ for pattern, handler, *extra in self.url_map:
if path == pattern:
found = True
break
@@ -93,7 +93,7 @@ class WebApp:
def route(self, url, **kwargs):
def _route(f):
- self.routes.append((url, f, kwargs))
+ self.url_map.append((url, f, kwargs))
return f
return _route
|
WebApp: Renaming app's attribute to .url_map, for compat with Flask. It's unclear how much real compatibility this will give, but at least naming will be consistent.
|
py
|
diff --git a/pages/models.py b/pages/models.py
index <HASH>..<HASH> 100644
--- a/pages/models.py
+++ b/pages/models.py
@@ -456,6 +456,8 @@ class Page(MPTTModel):
"""Return content of each placeholder in each language."""
out = []
for p in get_placeholders(self.get_template()):
+ if p.name in ('title', 'slug'):
+ continue # these were already included
out.append((p.name, langs(
lambda lang: self.get_content(lang, p.name,
language_fallback=False))))
|
fix: don't doubly-export title or slug if they appear as placeholders
|
py
|
diff --git a/certsuite/harness.py b/certsuite/harness.py
index <HASH>..<HASH> 100644
--- a/certsuite/harness.py
+++ b/certsuite/harness.py
@@ -172,7 +172,7 @@ class Device(object):
return self
def __exit__(self, *args, **kwargs):
- # Original settings are restarted by Device.restore
+ # Original settings are reinstated by Device.restore
logger.info("Tearing down device after testing")
with MarionetteSession(self.adb) as marionette:
lock_screen = gaiautils.LockScreen(marionette)
|
fixup! Bug <I>: Disable screen lock, display sleep, and switch on screen in super harness
|
py
|
diff --git a/python/buildgeocodingdata.py b/python/buildgeocodingdata.py
index <HASH>..<HASH> 100755
--- a/python/buildgeocodingdata.py
+++ b/python/buildgeocodingdata.py
@@ -119,7 +119,7 @@ def output_geodata_code(geodata, outfilename):
for prefix in sorted(geodata.keys()):
if len(prefix) > longest_prefix:
longest_prefix = len(prefix)
- print >> outfile, " '%s': %r," % (prefix, geodata[prefix])
+ print >> outfile, " '%s':%r," % (prefix, geodata[prefix])
print >> outfile, "}"
print >> outfile, "GEOCODE_LONGEST_PREFIX = %d" % longest_prefix
|
Remove unnecessary spaces (reduces generated file size by 5%)
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -33,7 +33,7 @@ setup(
python_requires='>=3.5, <4',
extras_require={
'tester': [
- "eth-tester[py-evm]==0.1.0-beta.26",
+ "eth-tester[py-evm]==0.1.0-beta.29",
"py-geth>=2.0.1,<3.0.0",
],
'testrpc': ["eth-testrpc>=1.3.3,<2.0.0"],
|
upgrade eth-tester to beta <I>
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -7,7 +7,7 @@ setup(
url='https://github.com/amueller/word_cloud',
description='A little word cloud generator',
license='MIT',
- ext_modules=[Extension("wordcloud/query_integral_image",
+ ext_modules=[Extension("wordcloud.query_integral_image",
["wordcloud/query_integral_image.c"])],
packages=['wordcloud'],
package_data={'wordcloud': ['stopwords', 'DroidSansMono.ttf']}
|
hopefully fixing cython built
|
py
|
diff --git a/Lib/fontbakery/configuration.py b/Lib/fontbakery/configuration.py
index <HASH>..<HASH> 100644
--- a/Lib/fontbakery/configuration.py
+++ b/Lib/fontbakery/configuration.py
@@ -5,7 +5,10 @@ import yaml
class Configuration(dict):
def __init__(self, **kwargs):
super().__init__(kwargs)
- for required_arg in ["custom_order", "explicit_checks", "exclude_checks"]:
+ for required_arg in ["custom_order",
+ "explicit_checks",
+ "exclude_checks",
+ "full_lists"]:
if required_arg not in self:
self[required_arg] = None
|
Default "full_lists" to None on the Configuration class (fixes #<I>)
|
py
|
diff --git a/mailer/models.py b/mailer/models.py
index <HASH>..<HASH> 100644
--- a/mailer/models.py
+++ b/mailer/models.py
@@ -1,3 +1,4 @@
+import base64
import logging
import pickle
@@ -84,13 +85,22 @@ class Message(models.Model):
return False
def _get_email(self):
- if self.message_data == "":
+ if self.message_data == u"":
return None
else:
- return pickle.loads(self.message_data.encode("ascii"))
+ try:
+ return pickle.loads(base64.decodestring(self.message_data))
+ except Exception:
+ try:
+ # previous method was to just do pickle.dumps(val)
+ return pickle.loads(self.message_data.encode("ascii"))
+ except Exception:
+ return None
def _set_email(self, val):
- self.message_data = pickle.dumps(val)
+ # pickle.dumps returns essentially binary data which we need to encode
+ # to store in a unicode field.
+ self.message_data = base64.encodestring(pickle.dumps(val))
email = property(_get_email, _set_email, doc=
"""EmailMessage object. If this is mutated, you will need to
|
Fixed bug with unicode data in email causing SQL failures
|
py
|
diff --git a/faker/providers/person/uk_UA/__init__.py b/faker/providers/person/uk_UA/__init__.py
index <HASH>..<HASH> 100644
--- a/faker/providers/person/uk_UA/__init__.py
+++ b/faker/providers/person/uk_UA/__init__.py
@@ -1,15 +1,24 @@
# coding=utf-8
from __future__ import unicode_literals
+
+from collections import OrderedDict
+
from .. import Provider as PersonProvider
class Provider(PersonProvider):
- formats = (
- '{{first_name_male}} {{last_name}}',
- '{{first_name_female}} {{last_name}}',
- '{{prefix_male}} {{first_name_male}} {{last_name}}',
- '{{prefix_female}} {{first_name_female}} {{last_name}}',
- )
+ formats_female = OrderedDict((
+ ('{{first_name_female}} {{last_name}}', 0.9),
+ ('{{prefix_female}} {{first_name_female}} {{last_name}}', 0.1),
+ ))
+
+ formats_male = OrderedDict((
+ ('{{first_name_male}} {{last_name}}', 0.9),
+ ('{{prefix_male}} {{first_name_male}} {{last_name}}', 0.1),
+ ))
+
+ formats = formats_female.copy()
+ formats.update(formats_male)
# Source: uk.wikipedia.org/wiki/Українські_імена
first_names_male = (
|
Differentiate Ukrainian female and male names
|
py
|
diff --git a/examples/basic_usage.py b/examples/basic_usage.py
index <HASH>..<HASH> 100644
--- a/examples/basic_usage.py
+++ b/examples/basic_usage.py
@@ -22,6 +22,4 @@ algo = SVD()
# Evaluate performances of our algorithm on the dataset.
perf = evaluate(algo, data, measures=['RMSE', 'MAE'])
-print(algo.qi)
-
print_perf(perf)
|
Removed useless line in basic_usage.py
|
py
|
diff --git a/gct.py b/gct.py
index <HASH>..<HASH> 100644
--- a/gct.py
+++ b/gct.py
@@ -47,8 +47,17 @@ class GCT(object):
raise ImportError('pandas is required to work with a GCT file in a DataFrame.' +
' Try: pip install pandas')
+ # Check to see if gct_io if a GPFile object from the GenePattern Python Client, if installed
+ try:
+ import gp
+ if isinstance(gct_obj, gp.GPFile):
+ gct_io = gct_obj.open()
+ except ImportError:
+ pass
+
# Check to see if gct_obj is a file-like object
- if hasattr(gct_obj, 'read'):
+ # Skip if a file-like object has already been obtained
+ if hasattr(gct_obj, 'read') and gct_io is None:
gct_io = gct_obj
# Check to see if gct_obj is a string
|
Add handling of GPFile objects in gct.py
|
py
|
diff --git a/ckanutils/api.py b/ckanutils/api.py
index <HASH>..<HASH> 100644
--- a/ckanutils/api.py
+++ b/ckanutils/api.py
@@ -318,7 +318,12 @@ class CKAN(object):
print('Downloading resource %s...' % resource_id)
if p.isdir(filepath):
- filepath = p.join(filepath, p.basename(url))
+ basename = p.basename(url)
+
+ if basename == 'export?format=csv':
+ basename = '%s.csv' % resource_id
+
+ filepath = p.join(filepath, basename)
headers = {'User-Agent': user_agent}
r = requests.get(url, stream=True, headers=headers)
|
Properly assign file names for google doc exports
|
py
|
diff --git a/tests/utils/notebook_test/__init__.py b/tests/utils/notebook_test/__init__.py
index <HASH>..<HASH> 100644
--- a/tests/utils/notebook_test/__init__.py
+++ b/tests/utils/notebook_test/__init__.py
@@ -111,7 +111,7 @@ def run_notebook(notebook, notebook_dir, kernel=None, no_cache=False, temp_dir='
nbformat.write(notebook, output_file)
output_nb = io.open(output_file, mode='r', encoding='utf-8')
for line in output_nb:
- if "Warning:" in line:
+ if "Warning:" in line and "numpy operator signatures" not in line:
errors.append("Warning:\n" + line)
if len(errors) > 0:
logging.error('\n'.join(errors))
|
Fix Nightly Tests for Binaries (#<I>) * copy missing requirements file * not treating numpy warnings as errors * updating six package version * Revert "updating six package version" This reverts commit <I>c5d<I>aace<I>f7bb6f<I>.
|
py
|
diff --git a/doc/test_messages_documentation.py b/doc/test_messages_documentation.py
index <HASH>..<HASH> 100644
--- a/doc/test_messages_documentation.py
+++ b/doc/test_messages_documentation.py
@@ -156,6 +156,8 @@ class LintModuleTest:
assert expected_messages == actual_messages
def assert_message_good(self, actual_messages: MessageCounter) -> str:
+ if not actual_messages:
+ return ""
messages = "\n- ".join(f"{v} (l. {i})" for i, v in actual_messages)
msg = f"""There should be no warning raised for 'good.py' but these messages were raised:
- {messages}
|
[test documentation] Bypass the case where everything went fine We're doing that a lot, it adds up
|
py
|
diff --git a/nanomath/nanomath.py b/nanomath/nanomath.py
index <HASH>..<HASH> 100644
--- a/nanomath/nanomath.py
+++ b/nanomath/nanomath.py
@@ -27,6 +27,11 @@ from math import log
class Stats(object):
def __init__(self, df):
+ if len(df) < 5:
+ sys.stderr.write("\n\nWARNING: less than 5 reads in the dataset!\n")
+ sys.stderr.write("WARNING: some stats might be unexpected or missing\n")
+ sys.stderr.write("WARNING: or a crash might happen, who knows\n")
+ sys.stderr.write("WARNING: this code is not intended for such small datasets\n\n\n")
self.number_of_reads = len(df)
self.number_of_bases = np.sum(df["lengths"])
self._with_readIDs = "readIDs" in df
|
print a warning with datasets that are too small intended for cases like <URL>
|
py
|
diff --git a/tests/is_element_present.py b/tests/is_element_present.py
index <HASH>..<HASH> 100644
--- a/tests/is_element_present.py
+++ b/tests/is_element_present.py
@@ -1,6 +1,7 @@
+from nose.tools import assert_true, assert_false, assert_equals
+
import warnings
-from nose.tools import assert_true, assert_false, assert_equals
class IsElementPresentTest(object):
|
fixed pep8 issues in test/is_element_present.py
|
py
|
diff --git a/master/buildbot/worker/docker.py b/master/buildbot/worker/docker.py
index <HASH>..<HASH> 100644
--- a/master/buildbot/worker/docker.py
+++ b/master/buildbot/worker/docker.py
@@ -292,7 +292,7 @@ class DockerLatentWorker(CompatibleLatentWorkerMixin,
host_conf = self.hostconfig.copy()
host_conf['binds'] = binds
if docker_py_version >= 2.2:
- host_conf['init'] = True
+ host_conf['init'] = host_conf.get('init', True)
host_conf = docker_client.create_host_config(**host_conf)
instance = docker_client.create_container(
|
allow user to set init in host_conf for docker
|
py
|
diff --git a/superset/db_engine_specs.py b/superset/db_engine_specs.py
index <HASH>..<HASH> 100644
--- a/superset/db_engine_specs.py
+++ b/superset/db_engine_specs.py
@@ -139,6 +139,9 @@ class MySQLEngineSpec(BaseEngineSpec):
"+ INTERVAL QUARTER({col}) QUARTER - INTERVAL 1 QUARTER"),
Grain("year", _('year'), "DATE(DATE_SUB({col}, "
"INTERVAL DAYOFYEAR({col}) - 1 DAY))"),
+ Grain("week_start_monday", _('week_start_monday'),
+ "DATE(DATE_SUB({col}, "
+ "INTERVAL DAYOFWEEK(DATE_SUB({col}, INTERVAL 1 DAY)) - 1 DAY))"),
)
@classmethod
|
Week beginning Monday time grain for MySQL (#<I>)
|
py
|
diff --git a/smartcard/pcsc/PCSCCardConnection.py b/smartcard/pcsc/PCSCCardConnection.py
index <HASH>..<HASH> 100644
--- a/smartcard/pcsc/PCSCCardConnection.py
+++ b/smartcard/pcsc/PCSCCardConnection.py
@@ -126,10 +126,17 @@ class PCSCCardConnection(CardConnection):
'Unable to connect with protocol: ' + \
dictProtocol[pcscprotocol] + '. ' + \
SCardGetErrorMessage(hresult))
+
protocol = 0
- for p in dictProtocol:
- if p == dwActiveProtocol:
- protocol = eval("CardConnection.%s_protocol" % dictProtocol[p])
+ if dwActiveProtocol == SCARD_PROTOCOL_T0 | SCARD_PROTOCOL_T1:
+ # special case for T0 | T1
+ # this happen when mode=SCARD_SHARE_DIRECT and no protocol is
+ # then negociated with the card
+ protocol = CardConnection.T0_protocol | CardConnection.T1_protocol
+ else:
+ for p in dictProtocol:
+ if p == dwActiveProtocol:
+ protocol = eval("CardConnection.%s_protocol" % dictProtocol[p])
PCSCCardConnection.setProtocol(self, protocol)
def disconnect(self):
|
PCSCCardConnection: special case for T0 | T1 When mode=SCARD_SHARE_DIRECT then no protocol is negociated with the card and dwActiveProtocol is set to SCARD_PROTOCOL_T0 | SCARD_PROTOCOL_T1. And "CardConnection.TO or T1_protocol" is not a valid value.
|
py
|
diff --git a/twitcher/owsproxy.py b/twitcher/owsproxy.py
index <HASH>..<HASH> 100644
--- a/twitcher/owsproxy.py
+++ b/twitcher/owsproxy.py
@@ -31,6 +31,8 @@ allowed_content_types = (
"image/png;mode=32bit",
"image/gif", # GIF
"image/jpeg", # JPEG
+ "application/json", # JSON
+ "application/json;charset=ISO-8859-1",
)
@@ -66,10 +68,9 @@ def _send_request(request, service):
content = None
try:
- if ct:
- content = resp.content.decode('utf-8', 'ignore')
- # replace urls in xml content
- if ct in ['text/xml', 'application/xml']:
+ if ct in ['text/xml', 'application/xml']:
+ # replace urls in xml content
+ content = resp.content.decode('utf-8', 'ignore')
content = content.replace(service['url'], proxy_url(request, service['name']))
else:
# raw content
|
fixed content value ... added json content-type
|
py
|
diff --git a/hex/utils/asc2hasc.py b/hex/utils/asc2hasc.py
index <HASH>..<HASH> 100644
--- a/hex/utils/asc2hasc.py
+++ b/hex/utils/asc2hasc.py
@@ -65,11 +65,11 @@ print("Calcs:" +
hexGrid = HASC()
hexGrid.init(hexCols, hexRows, esriGrid.xll, esriGrid.yll, hexSide, esriGrid.nodata)
-for j in range(hexGrid.ncols):
- for i in range(hexGrid.nrows):
+for j in range(hexGrid.nrows):
+ for i in range(hexGrid.ncols):
x = esriGrid.xll + i * 3 * hexSide / 2
y = esriGrid.yll + j * 2 * hexPerp + (j % 2) * hexPerp
- hexGrid.set(i, j, esriGrid.getNearestNEighbour(x, y))
+ hexGrid.set(i, j, esriGrid.getNearestNeighbour(x, y))
print ("Done!")
|
Fixed indexing in main loop in ASC2HASC.
|
py
|
diff --git a/kurt/fixed_objects.py b/kurt/fixed_objects.py
index <HASH>..<HASH> 100644
--- a/kurt/fixed_objects.py
+++ b/kurt/fixed_objects.py
@@ -325,6 +325,9 @@ class Point(FixedObject):
def value(self):
return (self.x, self.y)
+ def __iter__(self):
+ return iter(self.value)
+
def __repr__(self):
return 'Point(%r, %r)' % self.value
|
Make Point iterable so (x, y) = point works
|
py
|
diff --git a/flask_io/permissions.py b/flask_io/permissions.py
index <HASH>..<HASH> 100644
--- a/flask_io/permissions.py
+++ b/flask_io/permissions.py
@@ -16,10 +16,3 @@ class Permission(metaclass=ABCMeta):
Return `True` if permission is granted, `False` otherwise.
"""
pass
-
- @abstractmethod
- def has_object_permission(self, obj):
- """
- Return `True` if permission is granted, `False` otherwise.
- """
- pass
|
Remove method has_object_permission
|
py
|
diff --git a/salt/master.py b/salt/master.py
index <HASH>..<HASH> 100644
--- a/salt/master.py
+++ b/salt/master.py
@@ -1185,7 +1185,7 @@ class AESFuncs(object):
if any(key not in load for key in ('return', 'jid', 'id')):
return None
# if we have a load, save it
- if 'load' in load:
+ if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load)
@@ -1194,6 +1194,10 @@ class AESFuncs(object):
ret = {'jid': load['jid'],
'id': key,
'return': item}
+ if 'fun' in load:
+ ret['fun'] = load['fun']
+ if 'arg' in load:
+ ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
self._return(ret)
|
pass thru fun and fun_args if they exist
|
py
|
diff --git a/netmiko/__init__.py b/netmiko/__init__.py
index <HASH>..<HASH> 100644
--- a/netmiko/__init__.py
+++ b/netmiko/__init__.py
@@ -31,7 +31,7 @@ from netmiko.scp_functions import file_transfer, progress_bar
# Alternate naming
Netmiko = ConnectHandler
-__version__ = "4.0.1a1"
+__version__ = "4.1.0"
__all__ = (
"ConnectHandler",
"ConnLogOnly",
|
Roll version to <I> (#<I>)
|
py
|
diff --git a/artifactory.py b/artifactory.py
index <HASH>..<HASH> 100755
--- a/artifactory.py
+++ b/artifactory.py
@@ -838,5 +838,6 @@ class ArtifactoryPath(pathlib.Path, PureArtifactoryPath):
yield top, dirs, nondirs
for name in dirs:
new_path = top.joinpath(top, name)
- yield from self.walk(new_path)
+ for x in self.walk(new_path):
+ yield x
|
Use Python 2.x syntax (instead of yield from) for compatibility.
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -50,7 +50,7 @@ try:
"sphinx-me >= 0.1.2",
"unidecode",
"django-email-extras >= 0.2",
- "django >= 1.8, < 1.11",
+ "django >= 1.8",
"future <= 0.15.0",
],
classifiers = [
|
Removed upper Django requirement from setup.py
|
py
|
diff --git a/skew/resources/aws/s3.py b/skew/resources/aws/s3.py
index <HASH>..<HASH> 100644
--- a/skew/resources/aws/s3.py
+++ b/skew/resources/aws/s3.py
@@ -13,10 +13,32 @@
import jmespath
from skew.resources.aws import AWSResource
+import skew.awsclient
class Bucket(AWSResource):
+ @classmethod
+ def enumerate(cls, arn, region, account, resource_id=None):
+ resources = super(Bucket, cls).enumerate(arn, region, account,
+ resource_id)
+ client = skew.awsclient.get_awsclient(
+ cls.Meta.service, region, account)
+ region_resources = []
+ if region is None:
+ region = 'us-east-1'
+ for r in resources:
+ kwargs = {'Bucket': r.id}
+ response = client.call('get_bucket_location', **kwargs)
+ location = response.get('LocationConstraint', 'us-east-1')
+ if location is None:
+ location = 'us-east-1'
+ if location is 'EU':
+ location = 'eu-west-1'
+ if location == region:
+ region_resources.append(r)
+ return region_resources
+
class Meta(object):
service = 's3'
type = 'bucket'
|
When enumerating S3 buckets for a given region, only include the bucket if it's location constraint is set to that region. This, unfortunately, requires an API call for each bucket but I can't think of a better way to fix this. Fixes #<I>.
|
py
|
diff --git a/simpleai/search/local.py b/simpleai/search/local.py
index <HASH>..<HASH> 100644
--- a/simpleai/search/local.py
+++ b/simpleai/search/local.py
@@ -47,12 +47,15 @@ def _first_expander(fringe, iteration, viewer):
'''
Expander that expands only the first node on the fringe.
'''
- if viewer: viewer.chosen_node(fringe[0])
+ current = fringe[0]
+ neighbors = current.expand(local_search=True)
+
+ if viewer:
+ viewer.chosen_node(current)
+ viewer.expanded([current], [neighbors])
- neighbors = fringe[0].expand(local_search=True)
fringe.extend(neighbors)
- if viewer: viewer.expanded([fringe[0]], [neighbors])
def beam_best_first(problem, beam_size=100, iterations_limit=0, viewer=None):
|
Fixed problem with first_expander and the fringe references
|
py
|
diff --git a/formats/folia.py b/formats/folia.py
index <HASH>..<HASH> 100644
--- a/formats/folia.py
+++ b/formats/folia.py
@@ -6374,9 +6374,12 @@ def relaxng(filename=None):
name='metadata',
#ns=NSFOLIA,
),
- E.oneOrMore(
+ E.zeroOrMore(
E.ref(name='text'),
),
+ E.zeroOrMore(
+ E.ref(name='speech'),
+ ),
name='FoLiA',
ns = NSFOLIA
) ),
|
RelaxNG schema fix, root element speech was missing
|
py
|
diff --git a/bedup/tracking.py b/bedup/tracking.py
index <HASH>..<HASH> 100644
--- a/bedup/tracking.py
+++ b/bedup/tracking.py
@@ -364,7 +364,7 @@ def dedup_tracked(sess, volset, tt):
tt.format('{elapsed} Extent map {comm2:counter}/{comm2:total}')
tt.set_total(comm2=le)
for comm2 in query:
- space_gain2 += comm2.size * (len(comm2.inodes) - 1)
+ space_gain2 += comm2.size * (comm2.inode_count - 1)
tt.update(comm2=comm2)
for inode in comm2.inodes:
try:
@@ -394,7 +394,8 @@ def dedup_tracked(sess, volset, tt):
ofile_reserved = 7 + len(volset)
for comm3 in query:
- count3 = len(comm3.inodes)
+ assert comm3.fiemap_count == len(comm3.inodes)
+ count3 = comm3.fiemap_count
space_gain3 += comm3.size * (count3 - 1)
tt.update(comm3=comm3)
files = []
|
Use precomputed commonality counts whenever possible.
|
py
|
diff --git a/pystatsd/statsd.py b/pystatsd/statsd.py
index <HASH>..<HASH> 100644
--- a/pystatsd/statsd.py
+++ b/pystatsd/statsd.py
@@ -22,6 +22,7 @@ class Client(object):
self.host = host
self.port = port
self.log = logging.getLogger("pystatsd.client")
+ self.udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def timing(self, stat, time, sample_rate=1):
"""
@@ -76,11 +77,10 @@ class Client(object):
else:
sampled_data=data
- udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
for stat, value in sampled_data.iteritems():
send_data = "%s:%s" % (stat, value)
- udp_sock.sendto(send_data, addr)
+ self.udp_sock.sendto(send_data, addr)
except:
self.log.exception("unexpected error")
- pass # we don't care
\ No newline at end of file
+ pass # we don't care
|
create udp socket at initialization time, instead of creating a new socket on every send.
|
py
|
diff --git a/jeni.py b/jeni.py
index <HASH>..<HASH> 100644
--- a/jeni.py
+++ b/jeni.py
@@ -4,7 +4,7 @@
"""``jeni`` injects annotated dependencies"""
-__version__ = '0.3.7'
+__version__ = '0.3.8-dev'
import abc
import collections
|
HEAD is <I>.
|
py
|
diff --git a/trimesh/scene/scene.py b/trimesh/scene/scene.py
index <HASH>..<HASH> 100644
--- a/trimesh/scene/scene.py
+++ b/trimesh/scene/scene.py
@@ -478,7 +478,7 @@ class Scene:
Parameters
-----------
- resultion: (2,) int, resolution to render image
+ resolution: (2,) int, resolution to render image
**kwargs: passed to SceneViewer constructor
Returns
|
Fixed doc typo in trimesh/scene/scene.py
|
py
|
diff --git a/_pytest/terminal.py b/_pytest/terminal.py
index <HASH>..<HASH> 100644
--- a/_pytest/terminal.py
+++ b/_pytest/terminal.py
@@ -22,7 +22,7 @@ def pytest_addoption(parser):
group._addoption('-r',
action="store", dest="reportchars", default=None, metavar="chars",
help="show extra test summary info as specified by chars (f)ailed, "
- "(E)error, (s)skipped, (x)failed, (X)passed (w)warnings (a)all.")
+ "(E)error, (s)skipped, (x)failed, (X)passed (w)pytest-warnings (a)all.")
group._addoption('-l', '--showlocals',
action="store_true", dest="showlocals", default=False,
help="show locals in tracebacks (disabled by default).")
|
(w)warnings -> (w)pytest-warnings in "-r chars" help
|
py
|
diff --git a/tests/func/test_api.py b/tests/func/test_api.py
index <HASH>..<HASH> 100644
--- a/tests/func/test_api.py
+++ b/tests/func/test_api.py
@@ -1,5 +1,3 @@
-from __future__ import unicode_literals
-
import os
import shutil
import copy
|
py2: fix problem with ruamel.yaml and unicode
|
py
|
diff --git a/ibis/backends/tests/base.py b/ibis/backends/tests/base.py
index <HASH>..<HASH> 100644
--- a/ibis/backends/tests/base.py
+++ b/ibis/backends/tests/base.py
@@ -186,9 +186,11 @@ def get_common_spark_testing_client(data_directory, connect):
import pyspark.sql.types as pt
from pyspark.sql import SparkSession
- spark = SparkSession.builder.config(
- 'spark.default.parallelism', 4
- ).getOrCreate()
+ spark = (
+ SparkSession.builder.config('spark.default.parallelism', 4)
+ .config('spark.driver.bindAddress', '127.0.0.1')
+ .getOrCreate()
+ )
_spark_testing_client = connect(spark)
s = _spark_testing_client._session
num_partitions = 4
|
CI: Fixing Spark tests by settings the ip address to bind (#<I>)
|
py
|
diff --git a/ajax/endpoints.py b/ajax/endpoints.py
index <HASH>..<HASH> 100644
--- a/ajax/endpoints.py
+++ b/ajax/endpoints.py
@@ -11,12 +11,6 @@ from ajax.exceptions import AJAXError, AlreadyRegistered, NotRegistered, \
class BaseEndpoint(object):
- def __init__(self, application, model, method, pk):
- self.application = application
- self.model = model
- self.method = method
- self.pk = pk
-
def _encode_data(self, data):
"""Encode a ``QuerySet`` to a Python dict.
@@ -68,7 +62,16 @@ class BaseEndpoint(object):
return data
-class ModelEndpoint(BaseEndpoint):
+class BaseModelFormEndpoint(BaseEndpoint):
+ def __init__(self, application, model, method, pk):
+ self.application = application
+ self.model = model
+ self.method = method
+ self.pk = pk
+
+
+class ModelEndpoint(BaseModelFormEndpoint):
+
def create(self, request):
record = self.model(**self._extract_data(request))
if self.can_create(request.user, record):
@@ -176,7 +179,7 @@ class ModelEndpoint(BaseEndpoint):
return False
-class FormEndpoint(BaseEndpoint):
+class FormEndpoint(BaseModelFormEndpoint):
"""AJAX endpoint for processing Django forms.
The models and forms are processed in pretty much the same manner, only a
|
Rejiggered class hierarchy.
|
py
|
diff --git a/ait/core/val.py b/ait/core/val.py
index <HASH>..<HASH> 100644
--- a/ait/core/val.py
+++ b/ait/core/val.py
@@ -388,7 +388,7 @@ class Validator(object):
# Loop through the errors (if any) and set valid = False if any are found
# Display the error message
- for error in sorted(v.iter_errors(data)):
+ for error in v.iter_errors(data):
msg = "Schema-based validation failed for YAML file '" + self._ymlfile + "'"
self.ehandler.process(docnum, self._ymlproc.doclines, error, messages)
valid = False
|
Issue #<I> - Fix validation error message sorting issue Remove sorting of validation error message to resolve TypeError. Note, we could implementation our own exception here and address this issue but I haven't noticed a problem with not sorting the messages. Resolve #<I>
|
py
|
diff --git a/uncompyle6/semantics/transform.py b/uncompyle6/semantics/transform.py
index <HASH>..<HASH> 100644
--- a/uncompyle6/semantics/transform.py
+++ b/uncompyle6/semantics/transform.py
@@ -115,11 +115,14 @@ class TreeTransform(GenericASTTraversal, object):
call = expr[0]
LOAD_ASSERT = call[0]
- expr = call[1][0]
- node = SyntaxTree(
- kind,
- [assert_expr, jump_cond, LOAD_ASSERT, expr, RAISE_VARARGS_1]
- )
+ if isinstance(call[1], SyntaxTree):
+ expr = call[1][0]
+ node = SyntaxTree(
+ kind,
+ [assert_expr, jump_cond, LOAD_ASSERT, expr, RAISE_VARARGS_1]
+ )
+ pass
+ pass
else:
# ifstmt
# 0. testexpr (2)
|
Guard again improper assert transform... we see this happen in getheader() from <I>/lib/python<I>/http/client.pyc
|
py
|
diff --git a/tests/test_web_functional.py b/tests/test_web_functional.py
index <HASH>..<HASH> 100644
--- a/tests/test_web_functional.py
+++ b/tests/test_web_functional.py
@@ -186,8 +186,15 @@ class TestWebFunctional(unittest.TestCase):
def handler(request):
data = yield from request.post()
files = data.getall('file')
+ _file_names = []
for _file in files:
- self.assertEqual(_file.file.closed, False)
+ self.assertFalse(_file.file.closed)
+ if _file.filename == 'test1.jpeg':
+ self.assertEqual(_file.file.read(), b'binary data 1')
+ if _file.filename == 'test2.jpeg':
+ self.assertEqual(_file.file.read(), b'binary data 2')
+ _file_names.append(_file.filename)
+ self.assertCountEqual(_file_names, ['test1.jpeg', 'test2.jpeg'])
resp = web.Response(body=b'OK')
return resp
|
updated test case as per asvetlov feedback
|
py
|
diff --git a/lib/retester/retester.py b/lib/retester/retester.py
index <HASH>..<HASH> 100644
--- a/lib/retester/retester.py
+++ b/lib/retester/retester.py
@@ -79,7 +79,7 @@ class Result(object):
assert result in ["pass", "fail"]
- self.running_time = start_time - time.clock()
+ self.running_time = time.clock() - start_time
if result == "pass":
self.result = "pass"
|
Made retester running_time a positive value instead of negative...
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -48,7 +48,7 @@ setup(
'Programming Language :: Python :: 3.4'
],
test_suite='nolds.test_measures',
- install_requires=['numpy'],
+ install_requires=['numpy >=1.5', 'future >=0.8'],
extras_require={
'RANSAC': 'sklearn >=0.17'
},
|
uses concrete version numbers for requires (hopefully they are correct)
|
py
|
diff --git a/spyderlib/plugins/ipythonconsole.py b/spyderlib/plugins/ipythonconsole.py
index <HASH>..<HASH> 100644
--- a/spyderlib/plugins/ipythonconsole.py
+++ b/spyderlib/plugins/ipythonconsole.py
@@ -832,6 +832,7 @@ class IPythonConsole(SpyderPluginWidget):
kernel_manager = self.ipython_app.create_kernel_manager(connection_file)
shellwidget.ipython_widget.kernel_manager = kernel_manager
shellwidget.kernel_widget_id = kernel_widget_id
+ shellwidget.get_control().setFocus()
# Rename client tab
client_widget_id = id(shellwidget)
|
IPython Console: Focus client again after kernel restart
|
py
|
diff --git a/workbench/workers/rekall_adapter/rekall_adapter.py b/workbench/workers/rekall_adapter/rekall_adapter.py
index <HASH>..<HASH> 100644
--- a/workbench/workers/rekall_adapter/rekall_adapter.py
+++ b/workbench/workers/rekall_adapter/rekall_adapter.py
@@ -204,8 +204,6 @@ class WorkbenchRenderer(BaseRenderer):
# Unit test: Create the class, the proper input and run the execute() method for a test
-import pytest
[email protected]
def test():
"""rekall_adapter.py: Test."""
|
okay starting to unmark the rekall workers; going to dig into WTF is happening with test crashes on Travis
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -78,6 +78,7 @@ additions for Django projects. See the project page for more information:
license = 'New BSD License',
platforms = ['any'],
packages = packages,
+ cmdclass = cmdclasses,
data_files = data_files,
classifiers = ['Development Status :: 4 - Beta',
'Environment :: Web Environment',
|
Added cmdclass to the setup parameters to fix the data_files installation error.
|
py
|
diff --git a/cycy/repl.py b/cycy/repl.py
index <HASH>..<HASH> 100644
--- a/cycy/repl.py
+++ b/cycy/repl.py
@@ -47,12 +47,25 @@ class REPL(object):
elif not repl_input.strip():
continue
- if repl_input.startswith("dump "):
- repl_input = repl_input[5:]
- newly_compiled_functions = self.interpreter.compile(repl_input)
- for function_name in newly_compiled_functions:
- self.dump(function_name)
- self.stdout.write("\n")
+ if repl_input.startswith("##"):
+ command_and_argument = repl_input[2:].strip().split(" ", 1)
+ command = command_and_argument.pop(0)
+ if command_and_argument:
+ repl_input = command_and_argument.pop()
+ else:
+ repl_input = ""
+
+ if command == "dump":
+ new_functions = self.interpreter.compile(repl_input)
+ for function_name in new_functions:
+ self.dump(function_name)
+ self.stdout.write("\n")
+ elif command == "compile":
+ source_file = streamio.open_file_as_stream(repl_input)
+ self.interpreter.compile(source_file.readall())
+ source_file.close()
+ else:
+ self.stderr.write("Unknown command: '%s'\n" % (command,))
else:
self.interpret(repl_input)
self.stdout.write("\n")
|
Add ##compile, which doesn't work and probably has the wrong name, but...
|
py
|
diff --git a/salt/returners/rawfile_json.py b/salt/returners/rawfile_json.py
index <HASH>..<HASH> 100644
--- a/salt/returners/rawfile_json.py
+++ b/salt/returners/rawfile_json.py
@@ -55,7 +55,7 @@ def returner(ret):
'''
Write the return data to a file on the minion.
'''
- opts = _get_options({}) # Pass in empty ret, since this is a list of events
+ opts = _get_options(ret)
try:
with salt.utils.files.flopen(opts['filename'], 'a') as logfile:
salt.utils.json.dump(ret, logfile)
|
Fix applying of attributes for returner rawfile_json Arguments are not getting applied to the rawfile_json returner. For example if you specify an alternate filename for the output the default "/var/log/salt/events" is always used. Passing the `ret` to `_get_options(ret) resolve this.
|
py
|
diff --git a/django_rq/tests/tests.py b/django_rq/tests/tests.py
index <HASH>..<HASH> 100644
--- a/django_rq/tests/tests.py
+++ b/django_rq/tests/tests.py
@@ -15,7 +15,7 @@ from django_rq.workers import get_worker
try:
from rq_scheduler import Scheduler
- from .queues import get_scheduler
+ from ..queues import get_scheduler
RQ_SCHEDULER_INSTALLED = True
except ImportError:
RQ_SCHEDULER_INSTALLED = False
|
Fixed an import error that causes scheduler tests to not run.
|
py
|
diff --git a/hwt/simulator/simTestCase.py b/hwt/simulator/simTestCase.py
index <HASH>..<HASH> 100644
--- a/hwt/simulator/simTestCase.py
+++ b/hwt/simulator/simTestCase.py
@@ -15,9 +15,13 @@ from hwt.simulator.shortcuts import simPrepare
from hwt.simulator.simSignal import SimSignal
from hwt.simulator.utils import agent_randomize
from hwt.simulator.vcdHdlSimConfig import VcdHdlSimConfig
+from hwt.hdlObjects.types.arrayVal import ArrayVal
def allValuesToInts(sequenceOrVal):
+ if isinstance(sequenceOrVal, ArrayVal):
+ sequenceOrVal = sequenceOrVal.val
+
if isinstance(sequenceOrVal, Value):
return valToInt(sequenceOrVal)
elif not sequenceOrVal:
|
assertValSequenceEqual for ArrayValues as well
|
py
|
diff --git a/src/jukeboxmaya/mayaplugins/jbscene.py b/src/jukeboxmaya/mayaplugins/jbscene.py
index <HASH>..<HASH> 100644
--- a/src/jukeboxmaya/mayaplugins/jbscene.py
+++ b/src/jukeboxmaya/mayaplugins/jbscene.py
@@ -57,7 +57,7 @@ def get_current_scene_node():
:rtype: str | None
:raises: None
"""
- c = cmds.namespaceInfo(':', listOnlyDependencyNodes=True, absoluteName=True)
+ c = cmds.namespaceInfo(':', listOnlyDependencyNodes=True, absoluteName=True, dagPath=True)
l = cmds.ls(c, type='jb_sceneNode', absoluteName=True)
if not l:
return
|
Fix get current scene node No uses unique dagpaths
|
py
|
diff --git a/utils.py b/utils.py
index <HASH>..<HASH> 100644
--- a/utils.py
+++ b/utils.py
@@ -32,7 +32,7 @@ import logging
import shlex
import six
from flask import has_app_context, current_app
-from functools import partial
+from functools import partial, wraps
from werkzeug.utils import import_string, find_modules
@@ -320,3 +320,29 @@ def run_py_func(manager_run, command_line, passthrough=False):
sys.argv = sys_argv_orig
return namedtuple('Res', ('out', 'err', 'exit_code'))(out, err, exit_code)
+
+
+def toposort_depends(*dependencies):
+ """Set topological dependencies via decorator."""
+ def decorator(wrapped):
+ wrapped.__toposort_dependencies = set(dependencies)
+ return wrapped
+ return decorator
+
+
+def toposort_extract(wrapped):
+ """Extract topological dependencies."""
+ return getattr(wrapped, '__toposort_dependencies', set())
+
+
+def toposort_send(signal, sender, **kwargs):
+ """Send signal in topological order to all connected receivers."""
+ from toposort import toposort_flatten
+ if not signal.receivers:
+ return []
+ else:
+ return [(receiver, receiver(sender, **kwargs))
+ for receiver in toposort_flatten({
+ receiver: toposort_extract(receiver)
+ for receiver in signal.receivers_for(sender)
+ })]
|
global: toposort utility addition * NEW Adds decorator `toposort_depends` for specifying dependencies and `toposort_send` for sending signal to receivers in topological order.
|
py
|
diff --git a/pyang/translators/yang.py b/pyang/translators/yang.py
index <HASH>..<HASH> 100644
--- a/pyang/translators/yang.py
+++ b/pyang/translators/yang.py
@@ -106,6 +106,10 @@ _kwd_class = {
'feature': 'defs',
'extension': 'defs',
'_comment': 'comment',
+ 'augment': 'augment',
+ 'rpc': 'rpc',
+ 'notification': 'notification',
+ 'deviation': 'deviation',
'module': None,
'submodule': None,
}
|
fix whitespace between data defs and augment/rpc/notif etc
|
py
|
diff --git a/hatemile/util/beautifulsoup/beautifulsouphtmldomparser.py b/hatemile/util/beautifulsoup/beautifulsouphtmldomparser.py
index <HASH>..<HASH> 100644
--- a/hatemile/util/beautifulsoup/beautifulsouphtmldomparser.py
+++ b/hatemile/util/beautifulsoup/beautifulsouphtmldomparser.py
@@ -101,16 +101,16 @@ class BeautifulSoupHTMLDOMParser(HTMLDOMParser):
return self
def find_children(self, selector):
+ self.results = []
last_results = self.results
if isinstance(selector, BeautifulSoupHTMLDOMElement):
for result in last_results:
if self._in_list(result.children, selector):
- self.results[selector.get_data()]
+ self.results.append(selector.get_data())
break
else:
selector = re.sub('data-', 'dataaaaaa', selector)
selectors = re.split(',', selector)
- self.results = []
for sel in selectors:
for last_result in last_results:
results = last_result.select(sel)
|
:bug: Fix error finding the child of results
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -16,6 +16,7 @@ setup(
description="Python REPL build on top of prompt_toolkit",
long_description=long_description,
packages=find_packages("."),
+ package_data={"ptpython": ["py.typed"]},
install_requires=[
"appdirs",
"importlib_metadata;python_version<'3.8'",
|
Added py.typed to package_data in setup.py
|
py
|
diff --git a/source/rafcon/gui/config.py b/source/rafcon/gui/config.py
index <HASH>..<HASH> 100644
--- a/source/rafcon/gui/config.py
+++ b/source/rafcon/gui/config.py
@@ -64,6 +64,9 @@ class GuiConfig(ObservableConfig):
config_file = CONFIG_FILE
super(GuiConfig, self).load(config_file, path)
+ self.configure_gtk()
+ self.configure_colors()
+
# fill up shortcuts
if not using_default_config:
default_gui_config = yaml.load(self.default_config) if self.default_config else {}
|
fix(gui_config): Configure GTK and colors after loading a new config Fixes the issue that the theme cannot be set
|
py
|
diff --git a/test.py b/test.py
index <HASH>..<HASH> 100755
--- a/test.py
+++ b/test.py
@@ -545,6 +545,9 @@ class SquareSetTestCase(unittest.TestCase):
self.assertFalse(a1 == b1)
self.assertFalse(a2 == b2)
+ self.assertEqual(chess.SquareSet(chess.BB_ALL), chess.BB_ALL)
+ self.assertEqual(chess.BB_ALL, chess.SquareSet(chess.BB_ALL))
+
class PolyglotTestCase(unittest.TestCase):
|
Test: Allow comparing square sets to ints
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -2,7 +2,7 @@ from setuptools import setup
setup(
name='monolithe',
- packages=['monolithe', 'monolithe.lib', 'monolithe.specsvalidator'],
+ packages=['monolithe', 'monolithe.lib', 'monolithe.generators', 'monolithe.validators', 'monolithe.courgette'],
include_package_data=True,
version='0.0.1',
description='Monolithe is the generator of all documentation and SDK for Nuage Network VSP',
|
Add new package names in setup.py
|
py
|
diff --git a/docker/rest/settings.py b/docker/rest/settings.py
index <HASH>..<HASH> 100644
--- a/docker/rest/settings.py
+++ b/docker/rest/settings.py
@@ -27,7 +27,7 @@ KAFKA_CONSUMER_AUTO_COMMIT_ENABLE = True
KAFKA_CONSUMER_FETCH_MESSAGE_MAX_BYTES = 10 * 1024 * 1024 # 10MB
KAFKA_CONSUMER_SLEEP_TIME = 1
-KAFKA_INCOMING_TOPIC = os.getenv('KAFKA_INCOMING_TOPIC', 'demo.incoming')
+KAFKA_PRODUCER_TOPIC = os.getenv('KAFKA_PRODUCER_TOPIC', 'demo.incoming')
KAFKA_PRODUCER_BATCH_LINGER_MS = 25 # 25 ms before flush
KAFKA_PRODUCER_BUFFER_BYTES = 4 * 1024 * 1024 # 4MB before blocking
|
Fix REST-Kafka topic variable name in docker settings.py (#<I>)
|
py
|
diff --git a/tests/test_manhole.py b/tests/test_manhole.py
index <HASH>..<HASH> 100644
--- a/tests/test_manhole.py
+++ b/tests/test_manhole.py
@@ -1,5 +1,6 @@
import unittest
import os
+import select
import sys
import subprocess
import traceback
@@ -213,11 +214,13 @@ class ManholeTestCase(unittest.TestCase):
self._wait_for_strings(proc.read, 1,
'from PID:%s UID:%s' % (os.getpid(), os.getuid()),
)
- sock.send("exit()\n")
- sock.shutdown(socket.SHUT_RDWR)
+ sock.shutdown(socket.SHUT_WR)
+ select.select([sock], [], [], 5)
+ sock.recv(1024)
+ sock.shutdown(socket.SHUT_RD)
sock.close()
self._wait_for_strings(proc.read, 1,
- 'DONE.'
+ 'DONE.',
'Cleaning up.',
'Waiting for new connection'
)
@@ -292,8 +295,9 @@ def maybe_enable_coverage():
@atexit.register
def cleanup():
- cov.stop()
- cov.save()
+ if cov:
+ cov.stop()
+ cov.save()
def monkeypatch(mod, what):
|
Add a test for graceful disconnect.
|
py
|
diff --git a/ryu/ofproto/ofproto_v1_3_parser.py b/ryu/ofproto/ofproto_v1_3_parser.py
index <HASH>..<HASH> 100644
--- a/ryu/ofproto/ofproto_v1_3_parser.py
+++ b/ryu/ofproto/ofproto_v1_3_parser.py
@@ -1405,9 +1405,9 @@ class OFPPortStatus(MsgBase):
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPPortStatus, cls).parser(datapath, version, msg_type,
msg_len, xid, buf)
- (msg.reason,) = struct.unpack_from(
+ msg.reason = struct.unpack_from(
ofproto_v1_3.OFP_PORT_STATUS_PACK_STR, msg.buf,
- ofproto_v1_3.OFP_HEADER_SIZE)
+ ofproto_v1_3.OFP_HEADER_SIZE)[0]
msg.desc = OFPPort.parser(msg.buf,
ofproto_v1_3.OFP_PORT_STATUS_DESC_OFFSET)
return msg
|
of<I>: fix OFPPortStatus parser A correction same as OF<I>.
|
py
|
diff --git a/ipyrad/assemble/clustmap_across.py b/ipyrad/assemble/clustmap_across.py
index <HASH>..<HASH> 100644
--- a/ipyrad/assemble/clustmap_across.py
+++ b/ipyrad/assemble/clustmap_across.py
@@ -310,6 +310,7 @@ class Step6:
# check for errors
print("")
+ rasync.wait()
if not rasync.successful():
raise IPyradError(rasync.exception())
|
wait for vsearch cluster to finish
|
py
|
diff --git a/crispy_forms/layout.py b/crispy_forms/layout.py
index <HASH>..<HASH> 100644
--- a/crispy_forms/layout.py
+++ b/crispy_forms/layout.py
@@ -171,7 +171,7 @@ class BaseInput(object):
def __init__(self, name, value, **kwargs):
self.name = name
self.value = value
- self.id = kwargs.get('css_id', '')
+ self.id = kwargs.pop('css_id', '')
self.attrs = {}
if 'css_class' in kwargs:
|
Pop "css_id" to stop it being added as a "css-id" attribute
|
py
|
diff --git a/abl/vpath/base/simpleuri.py b/abl/vpath/base/simpleuri.py
index <HASH>..<HASH> 100644
--- a/abl/vpath/base/simpleuri.py
+++ b/abl/vpath/base/simpleuri.py
@@ -91,7 +91,7 @@ class UriParse(object):
def _init_other_uri(self):
"init code for non http uri"
uri, querysep, rest = self.uri.partition('?')
- if querysep:
+ if querysep and '=' in rest:
self.uri = uri
self.query = parse_query_string(rest)
parts = self.uri.split('://', 1)
|
only parse query string if "=" is expected
|
py
|
diff --git a/msvccompiler.py b/msvccompiler.py
index <HASH>..<HASH> 100644
--- a/msvccompiler.py
+++ b/msvccompiler.py
@@ -20,10 +20,7 @@ from distutils.ccompiler import \
_can_read_reg = 0
try:
- try:
- import _winreg
- except ImportError:
- import winreg # for pre-2000/06/29 CVS Python
+ import _winreg
_can_read_reg = 1
hkey_mod = _winreg
|
Simplify the registry-module-finding code: _winreg or win<I>api/win<I>con. This'll work fine with <I> or <I>, but is less than ideal for <I>a1/a2. But the code to accomodate <I>a1/a2 was released with Distutils <I>, so it can go away now.
|
py
|
diff --git a/datajoint/expression.py b/datajoint/expression.py
index <HASH>..<HASH> 100644
--- a/datajoint/expression.py
+++ b/datajoint/expression.py
@@ -313,7 +313,7 @@ class QueryExpression:
Each attribute name can only be used once.
"""
# new attributes in parentheses are included again with the new name without removing original
- duplication_pattern = re.compile(r'\s*\(\s*(?P<name>\w*[a-z]+\w*)\s*$\)\s*$')
+ duplication_pattern = re.compile(r'\s*\(\s*(?P<name>\w*[a-z]+\w*)\s*\)\s*$')
# attributes without parentheses renamed
rename_pattern = re.compile(r'\s*(?P<name>\w*[a-z]+\w*)\s*$')
replicate_map = {k: m.group('name')
|
Removed $ character from regex.
|
py
|
diff --git a/tests/integration/spm/test_repo.py b/tests/integration/spm/test_repo.py
index <HASH>..<HASH> 100644
--- a/tests/integration/spm/test_repo.py
+++ b/tests/integration/spm/test_repo.py
@@ -1,8 +1,6 @@
-# -*- coding: utf-8 -*-
"""
Tests for the spm repo
"""
-from __future__ import absolute_import, print_function, unicode_literals
import os
import shutil
|
Drop Py2 and six on tests/integration/spm/test_repo.py
|
py
|
diff --git a/cbamf/comp/ilms.py b/cbamf/comp/ilms.py
index <HASH>..<HASH> 100644
--- a/cbamf/comp/ilms.py
+++ b/cbamf/comp/ilms.py
@@ -27,8 +27,12 @@ class Polynomial3D(object):
return product(*(xrange(o) for o in self.order))
def _setup_rvecs(self):
+ # normalize all sizes to a strict upper bound on image size
+ # so we can transfer ILM between different images
+ MAX = 1024.0
+
o = self.shape
- self.rz, self.ry, self.rx = np.mgrid[0:o[0], 0:o[1], 0:o[2]] / float(max(o))
+ self.rz, self.ry, self.rx = np.mgrid[0:o[0], 0:o[1], 0:o[2]] / MAX
self._poly = []
for i,j,k in self._poly_orders():
|
fixing the max size of the illumiation for comparison across images
|
py
|
diff --git a/bika/lims/validators.py b/bika/lims/validators.py
index <HASH>..<HASH> 100644
--- a/bika/lims/validators.py
+++ b/bika/lims/validators.py
@@ -32,6 +32,7 @@ class UniqueFieldValidator:
for item in aq_parent(instance).objectValues():
if hasattr(item, 'UID') and item.UID() != instance.UID() and \
+ fieldname in item.Schema() and \
item.Schema()[fieldname].get(item) == value:
msg = _("Validation failed: '${value}' is not unique",
mapping={'value': safe_unicode(value)})
|
Do not assume all children of a content type are from the same type
|
py
|
diff --git a/slack_cleaner/cli.py b/slack_cleaner/cli.py
index <HASH>..<HASH> 100644
--- a/slack_cleaner/cli.py
+++ b/slack_cleaner/cli.py
@@ -168,6 +168,9 @@ def remove_files(time_range, user_id=None, types=None):
latest = time_range.end_ts
page = 1
+ if user_id == -1:
+ user_id = None
+
has_more = True
while has_more:
res = slack.files.list(user=user_id, ts_from=oldest, ts_to=latest,
|
Add check for the wildcard user argument for files
|
py
|
diff --git a/setuptools/command/install_scripts.py b/setuptools/command/install_scripts.py
index <HASH>..<HASH> 100755
--- a/setuptools/command/install_scripts.py
+++ b/setuptools/command/install_scripts.py
@@ -31,13 +31,13 @@ class install_scripts(orig.install_scripts):
)
bs_cmd = self.get_finalized_command('build_scripts')
exec_param = getattr(bs_cmd, 'executable', None)
- cmd = ei.CommandSpec.from_param(exec_param)
bw_cmd = self.get_finalized_command("bdist_wininst")
is_wininst = getattr(bw_cmd, '_is_running', False)
writer = ei.ScriptWriter
if is_wininst:
- cmd = ei.CommandSpec.from_string("python.exe")
+ exec_param = "python.exe"
writer = ei.WindowsScriptWriter
+ cmd = ei.CommandSpec.from_param(exec_param)
for args in writer.best().get_args(dist, cmd.as_header()):
self.write_script(*args)
|
Defer resolution of the CommandSpec and do it exactly once.
|
py
|
diff --git a/gnsq/reader.py b/gnsq/reader.py
index <HASH>..<HASH> 100644
--- a/gnsq/reader.py
+++ b/gnsq/reader.py
@@ -85,7 +85,7 @@ class Reader(object):
multiple readers are running
:param lookupd_poll_jitter: the maximum fractional amount of jitter to add
- to the lookupd pool loop. This helps evenly distribute requests even if
+ to the lookupd poll loop. This helps evenly distribute requests even if
multiple consumers restart at the same time.
:param low_ready_idle_timeout: the amount of time in seconds to wait for a
|
fix typo "pool" in `Reader` doc pool -> poll
|
py
|
diff --git a/iapws/iapws08.py b/iapws/iapws08.py
index <HASH>..<HASH> 100644
--- a/iapws/iapws08.py
+++ b/iapws/iapws08.py
@@ -662,7 +662,7 @@ def _Tension_SeaWater(T, S):
if S < 0 or S > 0.131:
raise NotImplementedError("Incoming out of bound")
else:
- raise NotImplementedError("Incoming out of bound")
+ raise NotImplementedError("Incoming out of bound")
sw = _Tension(T)
sigma = sw*(1+3.766e-1*S+2.347e-3*S*(T-273.15))
|
Correct flake8 E<I> complaint. ./iapws/iapws<I>.py:<I>:<I>: E<I> over-indented
|
py
|
diff --git a/bananas/admin.py b/bananas/admin.py
index <HASH>..<HASH> 100644
--- a/bananas/admin.py
+++ b/bananas/admin.py
@@ -260,7 +260,7 @@ class AdminView(View):
view = self.__class__.as_view(
action=view.__name__,
admin=self.admin,
- **initkwargs,
+ **initkwargs
)
return self.admin.admin_view(view, perm=perm)
|
Remove trailing comma after kwargs (SyntaxError for py<I>)
|
py
|
diff --git a/salt/states/file.py b/salt/states/file.py
index <HASH>..<HASH> 100644
--- a/salt/states/file.py
+++ b/salt/states/file.py
@@ -1529,7 +1529,7 @@ def recurse(name,
removed = _clean_dir(name, list(keep))
if removed:
ret['changes']['removed'] = removed
- ret['comment'] += 'Files cleaned from directory {0}'.format(name)
+ ret['comment'] = 'Files cleaned from directory {0}'.format(name)
return ret
|
Fixing operand mismatch between dict and string in file.recurse for return comment when a File is cleaned from a directory on recurse
|
py
|
diff --git a/master/buildbot/test/unit/test_reporter_gitlab.py b/master/buildbot/test/unit/test_reporter_gitlab.py
index <HASH>..<HASH> 100644
--- a/master/buildbot/test/unit/test_reporter_gitlab.py
+++ b/master/buildbot/test/unit/test_reporter_gitlab.py
@@ -132,8 +132,8 @@ class TestGitLabStatusPush(unittest.TestCase, ReporterTestMixin, logging.Logging
}, code=404)
build['complete'] = False
self.sp.buildStarted(("build", 20, "started"), build)
- self.assertLogged("Unknown \(or hidden\) gitlab projectbuildbot%2Fbuildbot:"
- " project not found")
+ self.assertLogged(r"Unknown \(or hidden\) gitlab projectbuildbot%2Fbuildbot:"
+ r" project not found")
@defer.inlineCallbacks
def test_nourl(self):
|
fix escape sequence for py3
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.