Dataset Viewer
diff
stringlengths 139
3.65k
| message
stringlengths 8
627
| diff_languages
stringclasses 1
value |
---|---|---|
diff --git a/magpie/config/__init__.py b/magpie/config/__init__.py
index <HASH>..<HASH> 100644
--- a/magpie/config/__init__.py
+++ b/magpie/config/__init__.py
@@ -1,9 +1,12 @@
from os import path
class ConfigPath(object):
+ def __init__(self):
+ self.config_paths = [path.join(path.expanduser('~'), '.magpie'), path.dirname(__file__)]
def __getattr__(self, key):
- return_path = path.join(path.dirname(__file__), key + '.cfg')
- if not path.exists(return_path): return None
- return return_path
+ for path in self.config_paths:
+ return_path = path.join(path, key + '.cfg')
+ if path.exists(return_path): return return_path
+ return None
config_path = ConfigPath()
|
Enable configuration from home directory This adds the possibility of defining multiple locations for the config-files. The given example first searches in ~/.magpie and if it doesn't find any config-files there, it searches in the default path. This enables configuration for each individual user and fixes the problem that magpie must be run as root if it was installed as root.
|
py
|
diff --git a/scout/models/case/gene_list.py b/scout/models/case/gene_list.py
index <HASH>..<HASH> 100644
--- a/scout/models/case/gene_list.py
+++ b/scout/models/case/gene_list.py
@@ -1,7 +1,8 @@
# -*- coding: utf-8 -*-
from mongoengine import (Document, ListField, StringField, FloatField,
DateTimeField, BooleanField, EmbeddedDocument,
- EmbeddedDocumentField, MapField, ReferenceField)
+ EmbeddedDocumentField, MapField, ReferenceField,
+ IntField)
from scout.models.hgnc_map import HgncGene
@@ -22,7 +23,7 @@ class Gene(EmbeddedDocument):
class GeneMeta(EmbeddedDocument):
- hgnc_id = StringField()
+ hgnc_id = IntField()
symbol = StringField()
def __unicode__(self):
|
store hgnc id as int
|
py
|
diff --git a/docs/conf.py b/docs/conf.py
index <HASH>..<HASH> 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -102,8 +102,6 @@ html_theme_options = {
"github_repo": "gidgethub",
"github_type": "star",
"github_banner": True,
- "travis_button": True,
- "codecov_button": True,
}
# Add any paths that contain custom static files (such as style sheets) here,
|
Drop Travis and codecov buttons in the docs
|
py
|
diff --git a/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py b/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
index <HASH>..<HASH> 100755
--- a/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
+++ b/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
@@ -1298,13 +1298,13 @@ class UniSpeechSatForPreTraining(UniSpeechSatPreTrainedModel):
```python
>>> import torch
- >>> from transformers import UniSpeechSatFeatureEncoder, UniSpeechSatForPreTraining
+ >>> from transformers import Wav2Vec2FeatureExtractor, UniSpeechSatForPreTraining
>>> from transformers.models.unispeech_sat.modeling_unispeech_sat import _compute_mask_indices
>>> from datasets import load_dataset
>>> import soundfile as sf
- >>> feature_extractor = UniSpeechSatFeatureEncoder.from_pretrained("patrickvonplaten/unispeech_sat-base")
- >>> model = UniSpeechSatForPreTraining.from_pretrained("patrickvonplaten/unispeech_sat-base")
+ >>> feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("microsoft/unispeech-sat-base")
+ >>> model = UniSpeechSatForPreTraining.from_pretrained("microsoft/unispeech-sat-base")
>>> def map_to_array(batch):
|
[Fix doc example] UniSpeechSatForPreTraining (#<I>) * fix doc example - cannot import name 'UniSpeechSatFeatureEncoder' * fix ckpt name
|
py
|
diff --git a/salt/grains/core.py b/salt/grains/core.py
index <HASH>..<HASH> 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
@@ -2036,7 +2036,7 @@ def _smartos_zone_data():
for mdata_grain in __salt__['cmd.run']('mdata-list').splitlines():
grain_data = __salt__['cmd.run']('mdata-get {0}'.format(mdata_grain))
- if mdata_grain == 'salt:roles': # parse salt:roles as roles grain
+ if mdata_grain == 'roles': # parse roles as roles grain
grain_data = grain_data.split(',')
grains['roles'] = grain_data
else: # parse other grains into mdata
|
use roles instread of salt:roles mdata entry
|
py
|
diff --git a/dpark/schedule.py b/dpark/schedule.py
index <HASH>..<HASH> 100644
--- a/dpark/schedule.py
+++ b/dpark/schedule.py
@@ -501,7 +501,7 @@ class MesosScheduler(DAGScheduler):
info.executor_id.value = sys.argv[0]
else:
dir = os.path.dirname(__file__)
- info.command.value = os.path.abspath(os.path.join(dir, 'executor.py'))
+ info.command.value = os.path.abspath(os.path.join(dir, 'executor%d%d.py' % sys.version_info[:2]))
info.executor_id.value = "default"
mem = info.resources.add()
@@ -681,7 +681,8 @@ class MesosScheduler(DAGScheduler):
# killed, lost, load failed
job.statusUpdate(task_id, tried, state, status.data)
- #self.slaveFailed[slave_id] = self.slaveFailed.get(slave_id,0) + 1
+ if state in (mesos_pb2.TASK_FAILED, mesos_pb2.TASK_LOST):
+ self.slaveFailed[slave_id] = self.slaveFailed.get(slave_id,0) + 1
def jobFinished(self, job):
logger.debug("job %s finished", job.id)
|
choose executor by python version block one slave when reaching MAX_FAILURES
|
py
|
diff --git a/pytodoist/todoist.py b/pytodoist/todoist.py
index <HASH>..<HASH> 100644
--- a/pytodoist/todoist.py
+++ b/pytodoist/todoist.py
@@ -468,7 +468,7 @@ class User(TodoistObject):
"""Return a list of tasks that match some search criteria.
.. note:: Example queries can be found
- `here https://todoist.com/Help/timeQuery`_.
+ `here <https://todoist.com/Help/timeQuery>`_.
:param queries: Return tasks that match at least one of these queries.
:type queries: list string
@@ -754,6 +754,9 @@ class Project(TodoistObject):
:return: The added task.
:rtype: :mod:`pytodoist.todoist.Task`
+ .. note:: See `here <https://todoist.com/Help/timeInsert>`_ for possible
+ date strings.
+
>>> from pytodoist import todoist
>>> user = todoist.login('[email protected]', 'password')
>>> project = user.get_project('Homework')
|
Added link to example task date strings in docstring.
|
py
|
diff --git a/webdriver_test_tools/project/cmd/new.py b/webdriver_test_tools/project/cmd/new.py
index <HASH>..<HASH> 100644
--- a/webdriver_test_tools/project/cmd/new.py
+++ b/webdriver_test_tools/project/cmd/new.py
@@ -90,7 +90,9 @@ def main(test_package_path, test_package, args):
# Arguments for page-specific prompts
kwargs = {}
if validated_file_type == new_file.PAGE_TYPE:
- prototype = getattr(args, 'prototype', '' if minimum_required_args else None)
+ prototype = getattr(args, 'prototype', None)
+ if prototype is None and minimum_required_args:
+ prototype = ''
_prototype_choices = [name for name in new_file.PROTOTYPE_NAMES]
# Allow for numeric shorthand answers (starting at 1)
_prototype_shorthands = {
|
Fixed prototype prompt not getting set to '' if minimum_required_args
|
py
|
diff --git a/setuptools/command/editable_wheel.py b/setuptools/command/editable_wheel.py
index <HASH>..<HASH> 100644
--- a/setuptools/command/editable_wheel.py
+++ b/setuptools/command/editable_wheel.py
@@ -237,6 +237,8 @@ class editable_wheel(Command):
cmd = dist.get_command_obj(cmd_name)
if hasattr(cmd, "editable_mode"):
cmd.editable_mode = True
+ elif hasattr(cmd, "inplace"):
+ cmd.inplace = True # backward compatibility with distutils
def _collect_build_outputs(self) -> Tuple[List[str], Dict[str, str]]:
files: List[str] = []
|
Fix backward compatibility with distutils
|
py
|
diff --git a/djcelery/loaders.py b/djcelery/loaders.py
index <HASH>..<HASH> 100644
--- a/djcelery/loaders.py
+++ b/djcelery/loaders.py
@@ -2,6 +2,7 @@ import imp
import importlib
import warnings
+from celery import signals
from celery.loaders.base import BaseLoader
from celery.datastructures import DictAttribute
@@ -18,6 +19,15 @@ class DjangoLoader(BaseLoader):
"database": "djcelery.backends.database.DatabaseBackend",
"cache": "djcelery.backends.cache.CacheBackend"}
+ def __init__(self, *args, **kwargs):
+ super(DjangoLoader, self).__init__(*args, **kwargs)
+ self._install_signal_handlers()
+
+ def _install_signal_handlers(self):
+ # Need to close any open database connection after
+ # any embedded celerybeat process forks.
+ signals.beat_embedded_init.connect(self.close_database)
+
def read_configuration(self):
"""Load configuration from Django settings."""
from django.conf import settings
|
Need to close any open database connection after any embedded celerybeat process forks.
|
py
|
diff --git a/runtests.py b/runtests.py
index <HASH>..<HASH> 100755
--- a/runtests.py
+++ b/runtests.py
@@ -95,6 +95,9 @@ if os.name != 'nt':
# the user isn't a member of the directory's group. Set the group explicitly
# to avoid this.
os.chown(temp_dir, -1, os.getegid())
+ # Some environments have a weird umask that can leave state directories too
+ # open and break tests.
+ os.umask(0o022)
# Redirect all temporary files to that location
tempfile.tempdir = temp_dir
|
tests: set umask explicitly The umask is typically <I>, but in some environments it's not.
|
py
|
diff --git a/torchvision/models/densenet.py b/torchvision/models/densenet.py
index <HASH>..<HASH> 100644
--- a/torchvision/models/densenet.py
+++ b/torchvision/models/densenet.py
@@ -55,7 +55,7 @@ def densenet201(pretrained=False, **kwargs):
def densenet161(pretrained=False, **kwargs):
- r"""Densenet-201 model from
+ r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
Args:
|
Typo (#<I>) I have made some performance improvements to the model, and testing them now. If results turn out good, I will submit a separate PR.
|
py
|
diff --git a/marshmallow_sqlalchemy/convert.py b/marshmallow_sqlalchemy/convert.py
index <HASH>..<HASH> 100644
--- a/marshmallow_sqlalchemy/convert.py
+++ b/marshmallow_sqlalchemy/convert.py
@@ -67,8 +67,7 @@ class ModelConverter(object):
mysql.LONGBLOB: fields.String,
mysql.SET: fields.List,
- mysql.ENUM: fields.Enum
-
+ mysql.ENUM: fields.Field,
}
DIRECTION_MAPPING = {
|
fields.Enum is deprecated. Change to fields.Field
|
py
|
diff --git a/salt/renderers/pyobjects.py b/salt/renderers/pyobjects.py
index <HASH>..<HASH> 100644
--- a/salt/renderers/pyobjects.py
+++ b/salt/renderers/pyobjects.py
@@ -263,7 +263,7 @@ from __future__ import absolute_import
import logging
import re
-from six import exec_
+from salt.utils.six import exec_
from salt.loader import _create_loader
from salt.fileclient import get_file_client
|
Replaced module six in file /salt/renderers/pyobjects.py
|
py
|
diff --git a/ores/ores.py b/ores/ores.py
index <HASH>..<HASH> 100644
--- a/ores/ores.py
+++ b/ores/ores.py
@@ -6,7 +6,7 @@ This script provides access to a set of utilities for ORES
* celery_worker -- Starts a "ScoreProcessor" celery worker
* precached -- Starts a daemon that requests scores for revisions as they happen
-%(usage)s
+{usage}
Options:
-h | --help Shows this documentation
<utility> The name of the utility to run
@@ -17,9 +17,9 @@ from importlib import import_module
USAGE = """Usage:
- %(progname)s (-h | --help)
- %(progname)s <utility> [-h | --help]
-""" % { "progname": sys.argv[0] }
+ {progname} (-h | --help)
+ {progname} <utility> [-h | --help]
+""".format(progname=sys.argv[0])
def main():
@@ -28,7 +28,7 @@ def main():
sys.stderr.write(USAGE)
sys.exit(1)
elif sys.argv[1] in ("-h", "--help"):
- sys.stderr.write(__doc__ % { "usage": USAGE })
+ sys.stderr.write(__doc__.format(usage=USAGE))
sys.exit(1)
elif sys.argv[1][:1] == "-":
sys.stderr.write(USAGE)
|
Changes string formatting to new style and removed weird whitespace.
|
py
|
diff --git a/dipper/sources/FlyBase.py b/dipper/sources/FlyBase.py
index <HASH>..<HASH> 100644
--- a/dipper/sources/FlyBase.py
+++ b/dipper/sources/FlyBase.py
@@ -1476,10 +1476,10 @@ class FlyBase(PostgreSQLSource):
# these are only omim diseases, not genes;
# we shouldn't be adding these here anyway
gu.addClassToGraph(g, did, dlabel)
- gu.addEquivalentClass(g, feature_id, did)
+ gu.addXref(g, feature_id, did)
else:
gu.addIndividualToGraph(g, did, dlabel)
- gu.addSameIndividual(g, feature_id, did)
+ gu.addXref(g, feature_id, did)
line_counter += 1
if not self.testMode \
|
change equivalent/sameAs to xref when processing dbxref table
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -11,7 +11,7 @@ setup(
author='Calvin Lobo',
author_email='[email protected]',
url='https://github.com/lobocv/pyperform',
- download_url='https://github.com/lobocv/pyperform/tarball/1.5',
- keywords=['testing', 'performance', 'comparison', 'convenience', 'logging', 'timeit', 'speed'],
+ download_url='https://github.com/lobocv/pyperform/tarball/%s' % __version__,
+ keywords=['testing', 'performance', 'comparison', 'convenience', 'logging', 'timeit', 'speed', 'crash reporting'],
classifiers=[],
-)
\ No newline at end of file
+)
|
fix to setup.py download url
|
py
|
diff --git a/httpbin/core.py b/httpbin/core.py
index <HASH>..<HASH> 100644
--- a/httpbin/core.py
+++ b/httpbin/core.py
@@ -59,7 +59,7 @@ def set_cors_headers(response):
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, DELETE, PATCH, OPTIONS'
response.headers['Access-Control-Max-Age'] = '3600' # 1 hour cache
if request.headers.get('Access-Control-Request-Headers') is not None:
- response.headers['Access-Control-Allow-Headers'] = request.headers.get('Access-Control-Request-Headers')
+ response.headers['Access-Control-Allow-Headers'] = request.headers['Access-Control-Request-Headers']
return response
|
got rid of unnecessary .get call
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -34,7 +34,10 @@ setup(name = "MacFSEvents",
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: C',
+ 'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
+ 'Programming Language :: Python :: 2.6',
+ 'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Filesystems',
],
|
Edited setup.py via GitHub
|
py
|
diff --git a/tests/test_models.py b/tests/test_models.py
index <HASH>..<HASH> 100644
--- a/tests/test_models.py
+++ b/tests/test_models.py
@@ -102,6 +102,17 @@ class PhotoModelIntegrationTestCase(TestCase):
photo = PhotoFactory(exif={'Make': 'Nikon'})
self.assertNotIn(photo, Photo.objects.for_exif('Make', 'Canon'))
+ def test_popular_tags(self):
+ PhotoFactory(tags=['cat'])
+ PhotoFactory(tags=['cat', 'fail'])
+ PhotoFactory(tags=['dog', 'cat', 'fail'])
+ tags = Photo.objects.popular_tags(count=3)
+ self.assertEqual(tags, [
+ {'count': 3, 'tag': 'cat'},
+ {'count': 2, 'tag': 'fail'},
+ {'count': 1, 'tag': 'dog'},
+ ])
+
def test_next_photo(self):
gallery = GalleryFactory()
photo1 = PhotoFactory(gallery=gallery)
|
Added tests for popular_tags method.
|
py
|
diff --git a/moto/events/responses.py b/moto/events/responses.py
index <HASH>..<HASH> 100644
--- a/moto/events/responses.py
+++ b/moto/events/responses.py
@@ -62,7 +62,9 @@ class EventsHandler(BaseResponse):
rule = self.events_backend.describe_rule(name)
if not rule:
- return self.error("ResourceNotFoundException", "Rule test does not exist.")
+ return self.error(
+ "ResourceNotFoundException", "Rule " + name + " does not exist."
+ )
rule_dict = self._generate_rule_dict(rule)
return json.dumps(rule_dict), self.response_headers
|
Fix a misleading error message AWSEvents.DescribeRule throws an error that references a rule named "test" rather than the specified rule name when a rule with the specified name does not exist. It has been fixed to reference the specified rule name.
|
py
|
diff --git a/pandas/io/tests/parser/common.py b/pandas/io/tests/parser/common.py
index <HASH>..<HASH> 100644
--- a/pandas/io/tests/parser/common.py
+++ b/pandas/io/tests/parser/common.py
@@ -149,11 +149,6 @@ c,3
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
- def test_multiple_skts_example(self):
- # TODO: Complete this
- data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11." # noqa
- pass
-
def test_malformed(self):
# see gh-6607
@@ -290,8 +285,11 @@ a,1,2
b,3,4
c,4,5
"""
- # TODO: complete this
- df = self.read_csv(StringIO(data)) # noqa
+ expected = DataFrame({'A': ['a', 'b', 'c'],
+ 'B': [1, 3, 4],
+ 'C': [2, 4, 5]})
+ out = self.read_csv(StringIO(data))
+ tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
|
CLN: Removed / filled stub read_csv tests (#<I>)
|
py
|
diff --git a/src/pikepdf/models/image.py b/src/pikepdf/models/image.py
index <HASH>..<HASH> 100644
--- a/src/pikepdf/models/image.py
+++ b/src/pikepdf/models/image.py
@@ -93,7 +93,7 @@ class PdfImageBase(ABC):
@abstractmethod
def _metadata(self, name, type_, default):
- raise NotImplementedError()
+ ...
@property
def width(self):
@@ -158,12 +158,12 @@ class PdfImageBase(ABC):
@property
@abstractmethod
def is_inline(self):
- raise NotImplementedError()
+ ...
@property
@abstractmethod
def icc(self):
- raise NotImplementedError()
+ ...
@property
def indexed(self):
@@ -270,7 +270,7 @@ class PdfImageBase(ABC):
@abstractmethod
def as_pil_image(self):
- raise NotImplementedError()
+ ...
@staticmethod
def _unstack_compression(buffer, filters):
|
image: use ellipsis for unimplemented abstractmethods
|
py
|
diff --git a/compiler/js/generator.py b/compiler/js/generator.py
index <HASH>..<HASH> 100644
--- a/compiler/js/generator.py
+++ b/compiler/js/generator.py
@@ -100,15 +100,18 @@ class generator(object):
used_re = re.compile(r'@using\s*{(.*?)}')
+ def scan_using(self, code):
+ for m in generator.used_re.finditer(code):
+ name = m.group(1).strip()
+ package, component_name = split_name(name)
+ package = escape_package(package)
+ self.used_components.add(name)
+ self.used_packages.add(package)
+
def generate_components(self):
#finding explicit @using declarations in code
for name, code in self.imports.iteritems():
- for m in generator.used_re.finditer(code):
- name = m.group(1).strip()
- package, component_name = split_name(name)
- package = escape_package(package)
- self.used_components.add(name)
- self.used_packages.add(package)
+ self.scan_using(code)
generated = set([root_type])
queue = ['core.Context']
@@ -214,6 +217,7 @@ class generator(object):
return self.replace_args(text)
def replace_args(self, text):
+ self.scan_using(text)
#COPY_ARGS optimization
def copy_args(m):
def expr(var, op, idx):
|
added generator.scan_using(), process .core.js files
|
py
|
diff --git a/spacy/glossary.py b/spacy/glossary.py
index <HASH>..<HASH> 100644
--- a/spacy/glossary.py
+++ b/spacy/glossary.py
@@ -95,6 +95,7 @@ GLOSSARY = {
"XX": "unknown",
"BES": 'auxiliary "be"',
"HVS": 'forms of "have"',
+ "_SP": "whitespace",
# POS Tags (German)
# TIGER Treebank
# http://www.ims.uni-stuttgart.de/forschung/ressourcen/korpora/TIGERCorpus/annotation/tiger_introduction.pdf
|
Add glossary entry for _SP (#<I>)
|
py
|
diff --git a/tests/PublicKey.py b/tests/PublicKey.py
index <HASH>..<HASH> 100644
--- a/tests/PublicKey.py
+++ b/tests/PublicKey.py
@@ -40,3 +40,7 @@ class PublicKey(Framework.TestCase):
self.public_key.key, "u5e1Z25+z8pmgVVt5Pd8k0z/sKpVL1MXYtRAecE4vm8="
)
self.assertEqual(self.public_key.key_id, "568250167242549743")
+ self.assertEqual(
+ repr(self.public_key),
+ 'PublicKey(key_id="568250167242549743", key="u5e1Z25+z8pmgVVt5Pd8k0z/sKpVL1MXYtRAecE4vm8=")',
+ )
|
Test repr() for PublicKey (#<I>) The new PublicKey tests did not test the repr() method, increase coverage by doing so.
|
py
|
diff --git a/setuptools_scm/version.py b/setuptools_scm/version.py
index <HASH>..<HASH> 100644
--- a/setuptools_scm/version.py
+++ b/setuptools_scm/version.py
@@ -22,8 +22,8 @@ def _warn_if_setuptools_outdated():
def callable_or_entrypoint(group, callable_or_name):
trace('ep', (group, callable_or_name))
if isinstance(callable_or_name, str):
- ep = next(iter_entry_points(group, callable_or_name))
- return ep.load()
+ for ep in iter_entry_points(group, callable_or_name):
+ return ep.load()
else:
return callable_or_name
|
simplify callable_or_entrypoint(group, callable_or_name)
|
py
|
diff --git a/visidata/graph.py b/visidata/graph.py
index <HASH>..<HASH> 100644
--- a/visidata/graph.py
+++ b/visidata/graph.py
@@ -92,8 +92,9 @@ class GraphSheet(InvertedYGridCanvas):
k = tuple(c.getValue(row) for c in catcols) if catcols else (ycol.name,)
attr = self.plotColor(k)
- graph_x = float(numcol.getTypedValue(row)) if self.xcols else rownum
- graph_y = ycol.getTypedValue(row)
+ # convert deliberately to float (to e.g. linearize date)
+ graph_x = float(numcol.type(numcol.getValue(row))) if self.xcols else rownum
+ graph_y = ycol.type(ycol.getValue(row))
self.point(graph_x, graph_y, attr, row)
nplotted += 1
|
[graph] do not plot exceptions
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -13,6 +13,6 @@ setup(
'uncurl = uncurl.bin:main',
],
},
- install_requires=['xerox'],
+ install_requires=['xerox', 'six'],
packages=find_packages(exclude=("tests", "tests.*")),
)
|
added six as dep in setup.py
|
py
|
diff --git a/salt/returners/mongo_future_return.py b/salt/returners/mongo_future_return.py
index <HASH>..<HASH> 100644
--- a/salt/returners/mongo_future_return.py
+++ b/salt/returners/mongo_future_return.py
@@ -126,3 +126,36 @@ def get_fun(fun):
if rdata:
ret[collection] = rdata
return ret
+
+
+def get_minions():
+ '''
+ Return a list of minions
+ '''
+ conn, db = _get_conn()
+ ret = []
+ for name in db.collection_names():
+ if len(name) = 20:
+ try:
+ int(name)
+ continue
+ except ValueError:
+ pass
+ ret.append(name)
+ return ret
+
+
+def get_jids():
+ '''
+ Return a list of job ids
+ '''
+ conn, db = _get_conn()
+ ret = []
+ for name in db.collection_names():
+ if len(name) = 20:
+ try:
+ int(name)
+ ret.append(name)
+ except ValueError:
+ pass
+ return ret
|
Add get_minions and get_jids to mongo returner
|
py
|
diff --git a/pygenstub.py b/pygenstub.py
index <HASH>..<HASH> 100644
--- a/pygenstub.py
+++ b/pygenstub.py
@@ -343,12 +343,7 @@ class ClassNode(StubNode):
:return: Lines of stub code for this class.
"""
stub = []
-
- if len(self.bases) > 0:
- bases = "(" + ", ".join(self.bases) + ")"
- else:
- bases = ""
-
+ bases = ("(" + ", ".join(self.bases) + ")") if len(self.bases) > 0 else ""
slots = {"n": self.name, "b": bases}
if len(self.children) == 0:
stub.append("class %(n)s%(b)s: ..." % slots)
|
fix: Minor improvement on base class code generation
|
py
|
diff --git a/ga4gh/client/cli.py b/ga4gh/client/cli.py
index <HASH>..<HASH> 100644
--- a/ga4gh/client/cli.py
+++ b/ga4gh/client/cli.py
@@ -842,6 +842,7 @@ class SearchExpressionLevelsRunner(AbstractSearchRunner):
expression.score, expression.units, sep="\t", end="\t")
print()
+
class ListPeersRunner(FormattedOutputRunner):
"""
Runner class for the references/{id}/bases method
@@ -858,6 +859,7 @@ class ListPeersRunner(FormattedOutputRunner):
for peer in peers:
print(peer.url)
+
class AnnouncePeerRunner(FormattedOutputRunner):
def __init__(self, args):
super(AnnouncePeerRunner, self).__init__(args)
|
Update cli.py flake failing
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -33,7 +33,7 @@ setup(
url='http://github.com/vpelletier/python-functionfs',
license='GPLv3+',
platforms=['linux'],
- packages=['functionfs'],
+ packages=['functionfs', 'functionfs.tests'],
install_requires=[
'ioctl-opt',
],
|
setup: Also distribute functionfs.tests in the egg. As was the original intention.
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -39,7 +39,7 @@ except ImportError:
description = 'Calculations for the position of the sun and moon.'
try:
from common.setup_funcs import read_contents
- long_description = read_contents(os.path.dirname(__file__), 'README')
+ long_description = read_contents(os.path.dirname(__file__), 'README.md')
except ImportError:
long_description = description
|
Read Me file now has a .md extension
|
py
|
diff --git a/lib/access_control_config.py b/lib/access_control_config.py
index <HASH>..<HASH> 100644
--- a/lib/access_control_config.py
+++ b/lib/access_control_config.py
@@ -241,6 +241,7 @@ CFG_ACC_ACTIVITIES_URLS = {
'cfgwebcomment' : (_("Configure WebComment"), "%s/admin/webcomment/webcommentadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebsearch' : (_("Configure WebSearch"), "%s/admin/websearch/websearchadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebsubmit' : (_("Configure WebSubmit"), "%s/admin/websubmit/websubmitadmin.py?ln=%%s" % CFG_SITE_URL),
+ 'cfgwebjournal' : (_("Configure WebJournal"), "%s/admin/webjournal/webjournaladmin.py?ln=%%s" % CFG_SITE_URL),
'runbibcirculation' : (_("Run BibCirculation"), "%s/admin/bibcirculation/bibcirculationadmin.py?ln=%%s" % CFG_SITE_URL)
}
|
WebJournal: display in admin menu * Display the link to the WebJournal administration pages in the administration activities menu.
|
py
|
diff --git a/claripy/operations.py b/claripy/operations.py
index <HASH>..<HASH> 100644
--- a/claripy/operations.py
+++ b/claripy/operations.py
@@ -179,11 +179,6 @@ def str_concat_length_calc(*args):
def str_replace_length_calc(*args):
str_1, str_2, str_3 = args
-<<<<<<< 2a35379e3742622796f30a0c8776d0cdd1ceb0c0
- # TODO: figure out how to deal with this
- # we need to know if the string has been replaced correctly
- # or not first in order to calculate the length...
-=======
# Return the maximum length that the string can assume after the replace
# operation
#
@@ -193,7 +188,6 @@ def str_replace_length_calc(*args):
if str_2.string_length >= str_3.string_length:
return str_1.string_length
# Otherwise We have the maximum length when teh replacement happens
->>>>>>> Fixed StrReplace length
return str_1.string_length - str_2.string_length + str_3.string_length
def strlen_bv_size_calc(s, bitlength):
|
Add if semplification in presence of extract
|
py
|
diff --git a/spyderlib/widgets/projectexplorer.py b/spyderlib/widgets/projectexplorer.py
index <HASH>..<HASH> 100644
--- a/spyderlib/widgets/projectexplorer.py
+++ b/spyderlib/widgets/projectexplorer.py
@@ -674,8 +674,10 @@ class ExplorerTreeWidget(OneColumnTree):
project.load()
self.projects.append(project)
self.__update_title()
+ self.save_expanded_state()
project.populate_tree(self, self.include, self.exclude, self.show_all)
project.refresh(self, self.include, self.exclude, self.show_all)
+ self.restore_expanded_state()
self.__sort_toplevel_items()
self.parent_widget.emit(SIGNAL("pythonpath_changed()"))
return project
|
Project Explorer: tree widget expanded state is saved/restored before/after adding a new project
|
py
|
diff --git a/openpnm/utils/Workspace.py b/openpnm/utils/Workspace.py
index <HASH>..<HASH> 100644
--- a/openpnm/utils/Workspace.py
+++ b/openpnm/utils/Workspace.py
@@ -211,7 +211,7 @@ class Workspace(dict):
"""
from openpnm.io import OpenpnmIO
- OpenpnmIO.save_project(filename=filename)
+ OpenpnmIO.load_project(filename=filename)
def close_project(self, project):
r"""
|
fixing mistaken call to save project in load project wrapper
|
py
|
diff --git a/yowsup/layers/axolotl/layer_receive.py b/yowsup/layers/axolotl/layer_receive.py
index <HASH>..<HASH> 100644
--- a/yowsup/layers/axolotl/layer_receive.py
+++ b/yowsup/layers/axolotl/layer_receive.py
@@ -94,7 +94,8 @@ class AxolotlReceivelayer(AxolotlBaseLayer):
self.getKeysFor([senderJid], successFn)
except exceptions.DuplicateMessageException:
- logger.warning("Received a message that we've previously decrypted, goint to send the delivery receipt myself")
+ logger.warning("Received a message that we've previously decrypted, "
+ "going to send the delivery receipt myself")
self.toLower(OutgoingReceiptProtocolEntity(node["id"], node["from"], participant=node["participant"]).toProtocolTreeNode())
except UntrustedIdentityException as e:
|
[log] fix typo Closes #<I> Spelling mistake
|
py
|
diff --git a/peewee.py b/peewee.py
index <HASH>..<HASH> 100644
--- a/peewee.py
+++ b/peewee.py
@@ -18,6 +18,13 @@ class Database(object):
def get_connection(self):
return sqlite3.connect(self.database)
+ def execute(self, sql, commit=False):
+ cursor = self.conn.cursor()
+ res = cursor.execute(sql)
+ if commit:
+ self.conn.commit()
+ return res
+
def last_insert_id(self):
result = self.execute("SELECT last_insert_rowid();")
return result.fetchone()[0]
@@ -202,11 +209,9 @@ class BaseQuery(object):
return computed_joins, where_with_alias, alias_map
def raw_execute(self):
- cursor = self.database.conn.cursor()
- result = cursor.execute(self.sql())
- if self.requires_commit:
- self.database.conn.commit()
- logger.debug(self.sql())
+ query = self.sql()
+ result = self.database.execute(query, self.requires_commit)
+ logger.debug(query)
return result
|
Moving the sql bits around a little more
|
py
|
diff --git a/nfc/dev/transport.py b/nfc/dev/transport.py
index <HASH>..<HASH> 100644
--- a/nfc/dev/transport.py
+++ b/nfc/dev/transport.py
@@ -227,7 +227,8 @@ class USB(object):
self.usb_dev.claimInterface(0)
except self.usb.USBError:
raise IOError("unusable device")
- self.usb_dev.reset()
+ if (dev.idVendor, dev.idProduct) in [(0x54c, 0x193), (0x4cc, 0x531)]:
+ self.usb_dev.reset() # needed for PN531 only
interface = dev.configurations[0].interfaces[0]
endpoints = interface[0].endpoints
bulk_inp = lambda ep: (\
|
fix: usb device reset when using pyusb 0.x is only needed for the PN<I> chip (makes trouble with PN<I>)
|
py
|
diff --git a/glances/stats_client.py b/glances/stats_client.py
index <HASH>..<HASH> 100644
--- a/glances/stats_client.py
+++ b/glances/stats_client.py
@@ -45,13 +45,18 @@ class GlancesStatsClient(GlancesStats):
header = "glances_"
for item in input_plugins:
# Import the plugin
- plugin = __import__(header + item)
- # Add the plugin to the dictionary
- # The key is the plugin name
- # for example, the file glances_xxx.py
- # generate self._plugins_list["xxx"] = ...
- logger.debug("Server uses {} plugin".format(item))
- self._plugins[item] = plugin.Plugin(args=self.args)
+ try:
+ plugin = __import__(header + item)
+ except ImportError:
+ # Server plugin can not be imported from the client side
+ logger.error("Can not import {} plugin. Please upgrade your Glances client/server version.".format(item))
+ else:
+ # Add the plugin to the dictionary
+ # The key is the plugin name
+ # for example, the file glances_xxx.py
+ # generate self._plugins_list["xxx"] = ...
+ logger.debug("Server uses {} plugin".format(item))
+ self._plugins[item] = plugin.Plugin(args=self.args)
# Restoring system path
sys.path = sys_path
|
Improve error message as asked in issue #<I>
|
py
|
diff --git a/pilight/test/pilight_daemon.py b/pilight/test/pilight_daemon.py
index <HASH>..<HASH> 100644
--- a/pilight/test/pilight_daemon.py
+++ b/pilight/test/pilight_daemon.py
@@ -5,7 +5,7 @@
commands are supported.
This is a very hackish synchronous daemon that is
- not an example for good sockets servers!
+ not an example for good socket servers!
"""
import datetime
@@ -66,7 +66,7 @@ class PilightDeamonSim(threading.Thread):
# Setup thread
threading.Thread.__init__(self)
self.daemon = True
- self._stop = threading.Event()
+ self._stop_thread = threading.Event()
self._lock = threading.Lock()
self._data = queue.Queue()
@@ -101,7 +101,7 @@ class PilightDeamonSim(threading.Thread):
def run(self):
"""Simple infinite loop handling socket connections."""
with self._lock:
- while not self._stop.wait(0.01):
+ while not self._stop_thread.wait(0.01):
self._handle_client_connections()
self._handle_client_data()
self._send_codes()
@@ -180,7 +180,7 @@ class PilightDeamonSim(threading.Thread):
def stop(self):
"""Called to stop the reveiver thread."""
- self._stop.set()
+ self._stop_thread.set()
with self._lock: # Receive thread might use the socket
try:
self.server_socket.shutdown(socket.SHUT_RDWR)
|
BUG: do not use builtins
|
py
|
diff --git a/craftai/time.py b/craftai/time.py
index <HASH>..<HASH> 100644
--- a/craftai/time.py
+++ b/craftai/time.py
@@ -49,7 +49,7 @@ class Time(object):
offset = int(tz[-4:-2]) * 60 + int(tz[-2:])
if tz[0] == "-":
offset = -offset
- time = time.astimezone(tzinfo=timezone(offset))
+ time = time.astimezone(tz=timezone(timedelta(seconds=offset)))
else:
raise CraftAITimeError(
"""Unable to instantiate Time with the given timezone."""
|
Fixed bad keyword argument use with astimezone * offset argument in timezone instantiation needs to be a timedelta -> fixed * keyword argument is not 'tzinfo' but just 'tz'
|
py
|
diff --git a/allauth/socialaccount/providers/oauth2/client.py b/allauth/socialaccount/providers/oauth2/client.py
index <HASH>..<HASH> 100644
--- a/allauth/socialaccount/providers/oauth2/client.py
+++ b/allauth/socialaccount/providers/oauth2/client.py
@@ -28,7 +28,7 @@ class OAuth2Client(object):
self.consumer_secret = consumer_secret
self.scope = scope_delimiter.join(set(scope))
self.state = None
- self.headers = None
+ self.headers = headers
self.basic_auth = basic_auth
def get_redirect_url(self, authorization_url, extra_params):
|
Fix: Header not propagated from adapater to client (#<I>) Bug: The header is not propagated from adapter to client Oauth2 result: reddit reponse <I> too many request. <URL>
|
py
|
diff --git a/spyder/plugins/editor/widgets/codeeditor.py b/spyder/plugins/editor/widgets/codeeditor.py
index <HASH>..<HASH> 100644
--- a/spyder/plugins/editor/widgets/codeeditor.py
+++ b/spyder/plugins/editor/widgets/codeeditor.py
@@ -2067,13 +2067,12 @@ class CodeEditor(TextEditBaseWidget):
self.contentOffset()).top()
bottom = top + self.blockBoundingRect(block).height()
folding_panel = self.panels.get(FoldingPanel)
-
while block.isValid() and top < event.pos().y():
block = block.next()
if block.isVisible(): # skip collapsed blocks
top = bottom
bottom = top + self.blockBoundingRect(block).height()
- line_number += 1
+ line_number += 1
return line_number
def select_lines(self, linenumber_pressed, linenumber_released):
|
Count line numbers even if they are collapsed
|
py
|
diff --git a/ca/django_ca/crl.py b/ca/django_ca/crl.py
index <HASH>..<HASH> 100644
--- a/ca/django_ca/crl.py
+++ b/ca/django_ca/crl.py
@@ -68,5 +68,10 @@ def get_crl(ca, encoding, expires, algorithm, password, ca_crl=False):
for cert in qs.revoked():
builder = builder.add_revoked_certificate(cert.get_revocation())
+ # TODO: Add CRLNumber extension
+ # https://cryptography.io/en/latest/x509/reference/#cryptography.x509.CRLNumber
+ # TODO: Add IssuingDistributionPoint extension
+ # https://cryptography.io/en/latest/x509/reference/#cryptography.x509.IssuingDistributionPoint
+
crl = builder.sign(private_key=ca.key(password), algorithm=algorithm, backend=default_backend())
return crl.public_bytes(encoding)
|
add some todos for CRLs
|
py
|
diff --git a/salt/modules/mine.py b/salt/modules/mine.py
index <HASH>..<HASH> 100644
--- a/salt/modules/mine.py
+++ b/salt/modules/mine.py
@@ -2,11 +2,11 @@
'''
The function cache system allows for data to be stored on the master so it can be easily read by other minions
'''
-from __future__ import absolute_import
# Import python libs
import copy
import logging
+from __future__ import absolute_import
# Import salt libs
import salt.crypt
@@ -313,12 +313,12 @@ def get_docker(interfaces=None, cidrs=None):
proxy_lists = {}
# Process docker info
- for host, containers in list(docker_hosts.items()):
+ for host, containers in docker_hosts.items():
host_ips = []
# Prepare host_ips list
if not interfaces:
- for iface, info in list(containers['host']['interfaces'].items()):
+ for iface, info in containers['host']['interfaces'].items():
if 'inet' in info:
for ip_ in info['inet']:
host_ips.append(ip_['address'])
|
List call not needed. Changing it back to what it was
|
py
|
diff --git a/stacker/plan.py b/stacker/plan.py
index <HASH>..<HASH> 100644
--- a/stacker/plan.py
+++ b/stacker/plan.py
@@ -44,8 +44,9 @@ class Step(object):
self.set_status(SUBMITTED)
def run(self, results):
- self.submit()
- return self._run_func(results, self.stack)
+ if not self.submitted:
+ self.submit()
+ return self._run_func(results, self.stack, status=self.status)
def set_status(self, status):
logger.debug("Setting %s state to %s.", self.stack.name, status.name)
|
Only submit the step if it hasn't been submitted yet
|
py
|
diff --git a/pyzotero/zotero.py b/pyzotero/zotero.py
index <HASH>..<HASH> 100644
--- a/pyzotero/zotero.py
+++ b/pyzotero/zotero.py
@@ -539,7 +539,7 @@ class Zotero(object):
"""
items = []
items.extend(query)
- while not self.links['self'] == self.links['last']:
+ while self.links.get('next'):
items.extend(self.follow())
return items
|
Don't use 'self' link to drive app state API v3 removed the 'self' link, so it's no longer possible to compare 'last' and 'self' to check whether more data can be retrieved. However, checking to see if 'next' exists should work just as well. Closes #<I>
|
py
|
diff --git a/scripts/VirtuaBuild/smoke_install_test.py b/scripts/VirtuaBuild/smoke_install_test.py
index <HASH>..<HASH> 100644
--- a/scripts/VirtuaBuild/smoke_install_test.py
+++ b/scripts/VirtuaBuild/smoke_install_test.py
@@ -1,4 +1,4 @@
-import time
+import time, sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, 'test', 'common')))
import workload_common
|
Forgot to import sys in smoke_install_test.py
|
py
|
diff --git a/gevent_zeromq/__init__.py b/gevent_zeromq/__init__.py
index <HASH>..<HASH> 100644
--- a/gevent_zeromq/__init__.py
+++ b/gevent_zeromq/__init__.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
"""gevent_zmq - gevent compatibility with zeromq.
Usage
|
Added an encoding line since we now have a "Ø".
|
py
|
diff --git a/nhlib/source/base.py b/nhlib/source/base.py
index <HASH>..<HASH> 100644
--- a/nhlib/source/base.py
+++ b/nhlib/source/base.py
@@ -41,10 +41,6 @@ class SeismicSource(object):
the trade-off between time needed to compute the :meth:`distance
<nhlib.geo.surface.base.BaseSurface.get_min_distance>` between
the rupture surface and a site and the precision of that computation.
- :param upper_seismogenic_depth:
- Minimum depth an earthquake rupture can reach, in km.
- :param lower_seismogenic_depth:
- Maximum depth an earthquake rupture can reach, in km.
:param magnitude_scaling_relationship:
Instance of subclass of :class:`nhlib.msr.base.BaseMSR` to describe
how does the area of the rupture depend on magnitude and rake.
|
source/base [docs]: removed erroneous info about upper and lower seism.d
|
py
|
diff --git a/pythran/tests/test_itertools.py b/pythran/tests/test_itertools.py
index <HASH>..<HASH> 100644
--- a/pythran/tests/test_itertools.py
+++ b/pythran/tests/test_itertools.py
@@ -114,6 +114,9 @@ def ifiltern_(l0):
def test_count2(self):
self.run_test("def count2(n): from itertools import count ; c = count(n,3.2) ; next(c); next(c); return next(c)", 100, count2=[int])
+ def test_count3(self):
+ self.run_test("def count3(n):\n from itertools import count\n j = 1\n for i in count(n):\n if i == 10: return j\n else: j +=1", 1, count3=[int])
+
def test_next_enumerate(self):
self.run_test("def next_enumerate(n): x = enumerate(n) ; next(x) ; return map(None, x)", range(5), next_enumerate=[[int]])
|
Better itertools.count test coverage
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
setup(
name='salic-ml',
- version='0.0.3',
+ version='0.0.4',
description='Automate the Salic proposal admission process',
url='https://github.com/lappis-unb/salic-ml',
license='GPL v3.0',
|
Update pip package version to <I>
|
py
|
diff --git a/lib/autokey/model/abstract_hotkey.py b/lib/autokey/model/abstract_hotkey.py
index <HASH>..<HASH> 100644
--- a/lib/autokey/model/abstract_hotkey.py
+++ b/lib/autokey/model/abstract_hotkey.py
@@ -45,7 +45,7 @@ class AbstractHotkey(AbstractWindowFilter):
modifiers.sort()
self.modifiers = modifiers
self.hotKey = key
- if key is not None:
+ if key is not None and TriggerMode.HOTKEY not in self.modes:
self.modes.append(TriggerMode.HOTKEY)
def unset_hotkey(self):
|
Fix for TriggerMode.HOTKEY (3) showing up in modes in the save file twice
|
py
|
diff --git a/talon/quotations.py b/talon/quotations.py
index <HASH>..<HASH> 100644
--- a/talon/quotations.py
+++ b/talon/quotations.py
@@ -280,10 +280,15 @@ def preprocess(msg_body, delimiter, content_type='text/plain'):
Replaces link brackets so that they couldn't be taken for quotation marker.
Splits line in two if splitter pattern preceded by some text on the same
line (done only for 'On <date> <person> wrote:' pattern).
+
+ Converts msg_body into a unicode.
"""
# normalize links i.e. replace '<', '>' wrapping the link with some symbols
# so that '>' closing the link couldn't be mistakenly taken for quotation
# marker.
+ if isinstance(msg_body, bytes):
+ msg_body = msg_body.decode('utf8')
+
def link_wrapper(link):
newline_index = msg_body[:link.start()].rfind("\n")
if msg_body[newline_index + 1] == ">":
|
Convert msg_body into unicode in preprocess.
|
py
|
diff --git a/nanoplotter/timeplots.py b/nanoplotter/timeplots.py
index <HASH>..<HASH> 100644
--- a/nanoplotter/timeplots.py
+++ b/nanoplotter/timeplots.py
@@ -36,7 +36,7 @@ def time_plots(df, subsampled_df, path, title=None, color="#4CB391",
log_length=False, plot_settings=None):
"""Making plots of time vs read length, time vs quality and cumulative yield."""
- logging.info("Nanoplotter: Creating timeplots using {} reads.".format(len(dfs)))
+ logging.info("Nanoplotter: Creating timeplots using {} (complete dataset)/{} (subsampled dataset) reads.".format(len(df), len(subsampled_df)))
cumyields = cumulative_yield(dfs=df.set_index("start_time"),
path=path,
title=title,
|
refactored variable name of dataframe to match
|
py
|
diff --git a/dj_static.py b/dj_static.py
index <HASH>..<HASH> 100644
--- a/dj_static.py
+++ b/dj_static.py
@@ -4,7 +4,10 @@ import static
from django.conf import settings
from django.core.handlers.wsgi import WSGIHandler
-from django.core.handlers.base import get_path_info
+try:
+ from django.core.handlers.base import get_path_info
+except ImportError:
+ from django.core.handlers.wsgi import get_path_info
from django.contrib.staticfiles.handlers import StaticFilesHandler as DebugHandler
try:
|
Make compatible with Django Dev (<I>+) `get_path_info` was moved from `django.core.handlers.base` to `django.core.handlers.wsgi` in commit <I>f. See <URL>
|
py
|
diff --git a/salt/returners/redis_return.py b/salt/returners/redis_return.py
index <HASH>..<HASH> 100644
--- a/salt/returners/redis_return.py
+++ b/salt/returners/redis_return.py
@@ -173,10 +173,17 @@ def get_fun(fun):
def get_jids():
'''
- Return a list of all job ids
+ Return a dict mapping all job ids to job information
'''
serv = _get_serv(ret=None)
- return list(serv.keys('load:*'))
+ ret = {}
+ for s in serv.mget(serv.keys('load:*')):
+ if s is None:
+ continue
+ load = json.loads(s)
+ jid = load['jid']
+ ret[jid] = salt.utils.jid.format_jid_instance(jid, load)
+ return ret
def get_minions():
|
Return a dict in `redis_return.get_jids`. which was fixed in #<I>, but broken by #<I>
|
py
|
diff --git a/spacy/pattern/parser.py b/spacy/pattern/parser.py
index <HASH>..<HASH> 100644
--- a/spacy/pattern/parser.py
+++ b/spacy/pattern/parser.py
@@ -38,8 +38,18 @@ class PatternParser(object):
if not pattern.nodes:
return
+ cls.check_pattern(pattern)
return pattern
+ @staticmethod
+ def check_pattern(pattern):
+ if not pattern.is_connected():
+ raise ValueError("The pattern tree must be a fully connected "
+ "graph.")
+
+ if pattern.root_node is None:
+ raise ValueError("The root node of the tree could not be found.")
+
@classmethod
def _parse_line(cls, stream, pattern, lineno):
while not stream.closed:
|
Check in PatternParser that the generated Pattern is valid
|
py
|
diff --git a/tests/unit/utils/test_ssdp.py b/tests/unit/utils/test_ssdp.py
index <HASH>..<HASH> 100644
--- a/tests/unit/utils/test_ssdp.py
+++ b/tests/unit/utils/test_ssdp.py
@@ -180,7 +180,7 @@ class SSDPFactoryTestCase(TestCase):
assert factory.log.debug.called
assert 'Permission error' in factory.log.debug.mock_calls[0][1][0]
- def test_datagram_received_bad_signature(self):
+ def test_datagram_signature_bad(self):
'''
Test datagram_received on bad signature
|
Rename test to the more meaningful name
|
py
|
diff --git a/esgfpid/assistant/unpublish.py b/esgfpid/assistant/unpublish.py
index <HASH>..<HASH> 100644
--- a/esgfpid/assistant/unpublish.py
+++ b/esgfpid/assistant/unpublish.py
@@ -27,7 +27,7 @@ class AssistantOneVersion(object):
if handle and version_number:
self.__both_given(handle, version_number)
- loginfo(LOGGER, 'Requesting to unpublish version %i of dataset %s from %s (handle: %s).', version_number, self._drs_id, self._data_node, handle)
+ loginfo(LOGGER, 'Requesting to unpublish version %s of dataset %s from %s (handle: %s).', version_number, self._drs_id, self._data_node, handle)
elif handle:
self.__only_handle_given(handle)
loginfo(LOGGER, 'Requesting to unpublish a version of dataset %s from %s (handle: %s).', self._drs_id, self._data_node, handle)
|
Fixed a log messages (expected number, but should be able to handle strings too).
|
py
|
diff --git a/galpy/df/quasiisothermaldf.py b/galpy/df/quasiisothermaldf.py
index <HASH>..<HASH> 100644
--- a/galpy/df/quasiisothermaldf.py
+++ b/galpy/df/quasiisothermaldf.py
@@ -1720,7 +1720,7 @@ class quasiisothermaldf(df):
"""
#initialize output array
- coord_v = numpy.array((numpy.size(R), 5))
+ coord_v = numpy.empty((numpy.size(R), 5))
#separate the coodinates into outliers and normal points.
# get the standard deviation and mean of R and z
Rz_set = numpy.stack((R,z), axis = 1)
|
fix bug with initializing empty array in sampleV interolate
|
py
|
diff --git a/stab.py b/stab.py
index <HASH>..<HASH> 100755
--- a/stab.py
+++ b/stab.py
@@ -27,14 +27,10 @@ def extract(fpath):
meta, content, first_line, meta_parsed = [], [], True, False
with open(fpath) as fp:
for line in fp:
- if line.strip() == '---' and not meta_parsed:
- if not first_line:
- meta_parsed = True
- first_line = False
- elif not meta_parsed:
- meta.append(line)
- else:
- content.append(line)
+ if line.strip() == '---' and first_line: first_line = False
+ elif line.strip() == '---' and not first_line and not meta_parsed: meta_parsed = True
+ elif not meta_parsed: meta.append(line)
+ else: content.append(line)
try:
return yaml.load('\n'.join(meta)), '\n'.join(content)
except:
|
Down to <I> lines :P :) :D
|
py
|
diff --git a/multilingual_tags/models.py b/multilingual_tags/models.py
index <HASH>..<HASH> 100644
--- a/multilingual_tags/models.py
+++ b/multilingual_tags/models.py
@@ -55,3 +55,6 @@ class TaggedItem(models.Model):
def __unicode__(self):
return '{0}: #{1}'.format(self.object, self.tag)
+
+ class Meta:
+ unique_together = ('content_type', 'object_id', 'tag')
|
Made Tag and the tagged object be unique together.
|
py
|
diff --git a/mutmut/__main__.py b/mutmut/__main__.py
index <HASH>..<HASH> 100644
--- a/mutmut/__main__.py
+++ b/mutmut/__main__.py
@@ -560,6 +560,7 @@ def run_mutation(config: Config, filename: str, mutation_id: MutationID, callbac
return cached_status
if hasattr(mutmut_config, 'pre_mutation'):
+ context.current_line_index = context.mutation_id.line_number
mutmut_config.pre_mutation(context=context)
config = context.config
if context.skip:
|
Nasty bug where context.current_source_line could be wrong
|
py
|
diff --git a/records.py b/records.py
index <HASH>..<HASH> 100644
--- a/records.py
+++ b/records.py
@@ -201,9 +201,11 @@ class RecordCollection(object):
return self.all(as_dict=not(ordered), as_ordereddict=ordered)
def first(self, default=None, as_dict=False, as_ordereddict=False):
- """Returns a single record for the RecordCollection, or `default`."""
+ """Returns a single record for the RecordCollection, or `default`. If
+ `default` is an instance or subclass of Exception, then raise it
+ instead of returning it."""
- # Try to get a record, or return default.
+ # Try to get a record, or return/raise default.
try:
record = self[0]
except IndexError:
|
Update docstring/comment re: raising default
|
py
|
diff --git a/f5/__init__.py b/f5/__init__.py
index <HASH>..<HASH> 100644
--- a/f5/__init__.py
+++ b/f5/__init__.py
@@ -12,4 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-__version__ = '2.0.2'
+__version__ = '2.1.0'
|
Bumping release to new version <I>
|
py
|
diff --git a/legos/xkcd.py b/legos/xkcd.py
index <HASH>..<HASH> 100644
--- a/legos/xkcd.py
+++ b/legos/xkcd.py
@@ -1,6 +1,5 @@
from Legobot.Lego import Lego
import requests
-import re
import logging
import json
import random
@@ -75,7 +74,7 @@ class XKCD(Lego):
if latest.status_code == requests.codes.ok:
latest_json = latest.text
latest_json = json.loads(latest_json)
- comic_id = random.randint(1,latest_json['num'])
+ comic_id = random.randint(1, latest_json['num'])
else:
logger.error('Requests encountered an error.')
logger.error('HTTP GET response code: %s' % latest.status_code)
|
Just call me Mr. Clean cleaning up code to pass tests.
|
py
|
diff --git a/examples/carla/models.py b/examples/carla/models.py
index <HASH>..<HASH> 100644
--- a/examples/carla/models.py
+++ b/examples/carla/models.py
@@ -20,7 +20,7 @@ class CarlaModel(Model):
further fully connected layers.
"""
- def _init(self, inputs, num_outputs, options):
+ def _build_layers(self, inputs, num_outputs, options):
# Parse options
image_shape = options["custom_options"]["image_shape"]
convs = options.get("conv_filters", [
|
[rllib] _init renamed to _build_layers in example
|
py
|
diff --git a/sharpy/parsers.py b/sharpy/parsers.py
index <HASH>..<HASH> 100644
--- a/sharpy/parsers.py
+++ b/sharpy/parsers.py
@@ -255,9 +255,9 @@ class CustomersParser(CheddarOutputParser):
def parse_invoices(self, invoices_element):
invoices = []
-
- for invoice_element in invoices_element:
- invoices.append(self.parse_invoice(invoice_element))
+ if invoices_element:
+ for invoice_element in invoices_element:
+ invoices.append(self.parse_invoice(invoice_element))
return invoices
|
Fixed issue 2: Empty invoice elements no longer cause errors.
|
py
|
diff --git a/hydpy/core/variabletools.py b/hydpy/core/variabletools.py
index <HASH>..<HASH> 100644
--- a/hydpy/core/variabletools.py
+++ b/hydpy/core/variabletools.py
@@ -1837,8 +1837,8 @@ has been determined, which is not a submask of `Soil([ True, True, False])`.
except BaseException:
objecttools.augment_excmessage(
f"While trying to {description} variable "
- f"{objecttools.devicephrase(self)} and "
- f"`{type(other).__name__}` instance `{other}`"
+ f"{objecttools.devicephrase(self)} and `{type(other).__name__}` "
+ f"instance `{objecttools.repr_(other)}`"
)
def __add__(self, other):
|
Make an exception message of class `Variable` of module `variabletools` related to math operations compatible with doctesting.
|
py
|
diff --git a/asciidoc_reader/asciidoc_reader.py b/asciidoc_reader/asciidoc_reader.py
index <HASH>..<HASH> 100644
--- a/asciidoc_reader/asciidoc_reader.py
+++ b/asciidoc_reader/asciidoc_reader.py
@@ -60,11 +60,11 @@ class AsciiDocReader(BaseReader):
logger.debug('AsciiDocReader: Reading: %s', source_path)
optlist = self.settings.get('ASCIIDOC_OPTIONS', []) + self.default_options
options = " ".join(optlist)
- content = call("%s %s -o - '%s'" % (cmd, options, source_path))
+ content = call("%s %s -o - \"%s\"" % (cmd, options, source_path))
# Beware! # Don't use tempfile.NamedTemporaryFile under Windows: https://bugs.python.org/issue14243
# Also, use mkstemp correctly (Linux and Windows): https://www.logilab.org/blogentry/17873
fd, temp_name = tempfile.mkstemp()
- content = call("%s %s -o %s '%s'" % (cmd, options, temp_name, source_path))
+ content = call("%s %s -o %s \"%s\"" % (cmd, options, temp_name, source_path))
with open(temp_name, encoding='utf-8') as f:
content = f.read()
os.close(fd)
|
[asciidoc_reader]: Rehandle file names with spaces Fixes #<I>
|
py
|
diff --git a/uproot_methods/convert.py b/uproot_methods/convert.py
index <HASH>..<HASH> 100644
--- a/uproot_methods/convert.py
+++ b/uproot_methods/convert.py
@@ -61,10 +61,7 @@ def towriteable(obj):
elif any(x == ("physt.histogram1d", "Histogram1D") for x in types(obj.__class__, obj)):
return ("uproot_methods.classes.TH1", "from_physt", "uproot.write.objects.TH1", "TH1")
- elif any(x == ("uproot_methods.classes.TH1", "Methods") for x in types(obj.__class__, obj)):
- return (None, None, "uproot.write.objects.TH1", "TH1")
-
- elif any(x == ("TH1", "Methods") for x in types(obj.__class__, obj)):
+ elif any(x == ("uproot_methods.classes.TH1", "Methods") or x == ("TH1", "Methods") for x in types(obj.__class__, obj)):
return (None, None, "uproot.write.objects.TH1", "TH1")
else:
|
Write histograms from ROOT files read by uproot
|
py
|
diff --git a/pyclustering/cluster/tests/kmedoids_templates.py b/pyclustering/cluster/tests/kmedoids_templates.py
index <HASH>..<HASH> 100755
--- a/pyclustering/cluster/tests/kmedoids_templates.py
+++ b/pyclustering/cluster/tests/kmedoids_templates.py
@@ -75,7 +75,7 @@ class KmedoidsTestTemplates:
initial_medoids.append(index_point);
- kmedoids_instance = kmedoids(data, initial_medoids, 0.025, ccore = True);
+ kmedoids_instance = kmedoids(data, initial_medoids, 0.025, ccore = ccore_flag);
kmedoids_instance.process();
clusters = kmedoids_instance.get_clusters();
|
#<I>: Template for unit-tests for the bug.
|
py
|
diff --git a/python/ccxt/base/exchange.py b/python/ccxt/base/exchange.py
index <HASH>..<HASH> 100644
--- a/python/ccxt/base/exchange.py
+++ b/python/ccxt/base/exchange.py
@@ -1067,11 +1067,8 @@ class Exchange(object):
@staticmethod
def hash(request, algorithm='md5', digest='hex'):
- if algorithm == 'keccak':
- binary = bytes(Exchange.web3.sha3(request))
- else:
- h = hashlib.new(algorithm, request)
- binary = h.digest()
+ h = hashlib.new(algorithm, request)
+ binary = h.digest()
if digest == 'base64':
return Exchange.encode(Exchange.binary_to_base64(binary))
elif digest == 'hex':
|
ignore keccak for hmac for now
|
py
|
diff --git a/gwpy/io/ligolw.py b/gwpy/io/ligolw.py
index <HASH>..<HASH> 100644
--- a/gwpy/io/ligolw.py
+++ b/gwpy/io/ligolw.py
@@ -533,10 +533,16 @@ def write_tables(target, tables, append=False, overwrite=False, **kwargs):
# write file
if isinstance(target, str):
- kwargs.setdefault('compress', target.endswith('.gz'))
+ kwargs.setdefault(
+ 'compress',
+ "gz" if target.endswith('.gz') else False,
+ )
ligolw_utils.write_filename(xmldoc, target, **kwargs)
elif isinstance(target, FILE_LIKE):
- kwargs.setdefault('compress', target.name.endswith('.gz'))
+ kwargs.setdefault(
+ 'compress',
+ "gz" if target.name.endswith('.gz') else False,
+ )
ligolw_utils.write_fileobj(xmldoc, target, **kwargs)
|
compress kwarg takes either 'gz' or False
|
py
|
diff --git a/tests/connection_test.py b/tests/connection_test.py
index <HASH>..<HASH> 100644
--- a/tests/connection_test.py
+++ b/tests/connection_test.py
@@ -113,9 +113,12 @@ class ConnectionTest(BaseTest):
def test_closed_connection_with_none_reader(self):
address = ('localhost', self.redis_port)
conn = yield from self.create_connection(address, loop=self.loop)
+ stored_reader = conn._reader
conn._reader = None
with self.assertRaises(ConnectionClosedError):
yield from conn.execute('blpop', 'test', 0)
+ conn._reader = stored_reader
+ conn.close()
@run_until_complete
def test_auth(self):
|
[#<I>] Clean mocked connection after test
|
py
|
diff --git a/flask_fedora_commons/__init__.py b/flask_fedora_commons/__init__.py
index <HASH>..<HASH> 100644
--- a/flask_fedora_commons/__init__.py
+++ b/flask_fedora_commons/__init__.py
@@ -383,7 +383,7 @@ class Repository(object):
self.create(entity_id)
sparql_template = Template("""$prefix
INSERT DATA {
- <$entity> $prop_uri $value_str;
+ <$entity> $prop_uri $value_str ;
}""")
sparql = sparql_template.substitute(
prefix=build_prefixes(self.namespaces),
@@ -395,7 +395,12 @@ class Repository(object):
data=sparql.encode(),
method='PATCH',
headers={'Content-Type': 'application/sparql-update'})
- response = urllib.request.urlopen(update_request)
+ try:
+ response = urllib.request.urlopen(update_request)
+ except urllib.error.HTTPError:
+ print("Error trying patch {}, sparql=\n{}".format(entity_uri,
+ sparql))
+ return False
if response.code < 400:
return True
return False
|
added error handling for Repository create method
|
py
|
diff --git a/airflow/www/views.py b/airflow/www/views.py
index <HASH>..<HASH> 100644
--- a/airflow/www/views.py
+++ b/airflow/www/views.py
@@ -271,7 +271,9 @@ def task_group_to_grid(task_item_or_group, dag, dag_runs, session):
def get_summary(dag_run, children):
child_instances = [child['instances'] for child in children if 'instances' in child]
- child_instances = [item for sublist in child_instances for item in sublist]
+ child_instances = [
+ item for sublist in child_instances for item in sublist if item['run_id'] == dag_run.run_id
+ ]
children_start_dates = [item['start_date'] for item in child_instances if item]
children_end_dates = [item['end_date'] for item in child_instances if item]
|
Check for run_id for grid group summaries (#<I>)
|
py
|
diff --git a/asammdf/mdf.py b/asammdf/mdf.py
index <HASH>..<HASH> 100644
--- a/asammdf/mdf.py
+++ b/asammdf/mdf.py
@@ -3454,7 +3454,7 @@ class MDF(object):
].channel_group.comment = f"{message} 0x{msg_id:X}"
if ignore_invalid_signals:
- max_flags.append([])
+ max_flags.append([False])
for ch_index, sig in enumerate(sigs, 1):
max_flags[cg_nr].append(np.all(sig.invalidation_bits))
|
fix index error in extract_can method
|
py
|
diff --git a/tests/test_search.py b/tests/test_search.py
index <HASH>..<HASH> 100644
--- a/tests/test_search.py
+++ b/tests/test_search.py
@@ -366,7 +366,7 @@ def test_we_can_search_with_change_history_field_gets_bugs():
]
}
- responses.add(responses.GET, 'https://bugzilla.mozilla.org/rest/bug?chfield=%5BBug+Creation%5D&chfield=Alias&chfieldvalue=foo&chfieldfrom=2014-12-01&chfieldto=2014-12-05&include_fields=version&include_fields=id&include_fields=summary&include_fields=status&include_fields=op_sys&include_fields=resolution&include_fields=product&include_fields=component&include_fields=platform',
+ responses.add(responses.GET, 'https://bugzilla.mozilla.org/rest/bug?chfield=%5BBug+creation%5D&chfield=Alias&chfieldvalue=foo&chfieldfrom=2014-12-01&chfieldto=2014-12-05&include_fields=version&include_fields=id&include_fields=summary&include_fields=status&include_fields=op_sys&include_fields=resolution&include_fields=product&include_fields=component&include_fields=platform',
body=json.dumps(return_1), status=200,
content_type='application/json', match_querystring=True)
|
Fix url in test to be valid
|
py
|
diff --git a/phy/utils/event.py b/phy/utils/event.py
index <HASH>..<HASH> 100644
--- a/phy/utils/event.py
+++ b/phy/utils/event.py
@@ -27,6 +27,9 @@ class EventEmitter(object):
"""
def __init__(self):
+ self.reset()
+
+ def reset(self):
self._callbacks = defaultdict(list)
def _get_on_name(self, func):
@@ -193,8 +196,11 @@ class ProgressReporter(EventEmitter):
def increment(self, **kwargs):
self._set_value(self._value + 1, **kwargs)
- def reset(self):
+ def reset(self, value_max=None):
+ super(ProgressReporter, self).reset()
self._value = 0
+ if value_max is not None:
+ self._value_max = value_max
@property
def value(self):
@@ -220,10 +226,9 @@ class ProgressReporter(EventEmitter):
"""Return wheter the task has completed."""
return self._value >= self._value_max
- def set_complete(self):
+ def set_complete(self, **kwargs):
"""Set the task as complete."""
- # self.value = self.value_max
- self._set_value(self.value_max)
+ self._set_value(self.value_max, **kwargs)
@property
def progress(self):
|
Added EventEmitter.reset() method.
|
py
|
diff --git a/pyqode/python/frontend/modes/goto_assignements.py b/pyqode/python/frontend/modes/goto_assignements.py
index <HASH>..<HASH> 100644
--- a/pyqode/python/frontend/modes/goto_assignements.py
+++ b/pyqode/python/frontend/modes/goto_assignements.py
@@ -143,9 +143,9 @@ class GoToAssignmentsMode(Mode, QtCore.QObject):
return checked
def _on_results_available(self, status, definitions):
+ self._pending = False
if status:
self.editor.set_mouse_cursor(QtCore.Qt.IBeamCursor)
- self._pending = False
definitions = [Assignment(path, line, col, full_name)
for path, line, col, full_name in definitions]
definitions = self._unique(definitions)
@@ -158,7 +158,7 @@ class GoToAssignmentsMode(Mode, QtCore.QObject):
_logger().debug(
"More than 1 assignments in different modules, user "
"need to make a choice: %s" % definitions)
- def_str, result = QtGui.QInputDialog.getItem(
+ def_str, result = QtWidgets.QInputDialog.getItem(
self.editor, "Choose a definition",
"Choose the definition you want to go to:",
[str(d) for d in definitions])
|
Fix a potential bug where pending is never reset also fix call to QInputDialog
|
py
|
diff --git a/gpflow/transforms.py b/gpflow/transforms.py
index <HASH>..<HASH> 100644
--- a/gpflow/transforms.py
+++ b/gpflow/transforms.py
@@ -106,7 +106,7 @@ class Log1pe(Transform):
return tf.nn.softplus(x) + self._lower
def log_jacobian_tensor(self, x):
- return tf.negative(tf.reduce_sum(tf.log(1. + tf.exp(-x))))
+ return tf.negative(tf.reduce_sum(tf.nn.softplus(tf.negative(x))))
def backward(self, y):
"""
|
adding softplus as needed in transforms
|
py
|
diff --git a/rgcompare.py b/rgcompare.py
index <HASH>..<HASH> 100755
--- a/rgcompare.py
+++ b/rgcompare.py
@@ -183,9 +183,9 @@ def comparison_worker(identity, input, output):
for match_id, player_fnames, map_fname, turns in iter(input.get, 'STOP'):
map_data = ast.literal_eval(open(map_fname).read())
- game.init_settings(map_data)
- players = [game.Player(open(x).read()) for x in player_fnames]
- g = game.Game(*players)
+ settings.init_map(map_data)
+ players = [game.Player(x) for x in player_fnames]
+ g = game.Game(players)
t_start = time.clock()
for i in range(turns):
|
Update to work with latest rgkit (with my playername fix)
|
py
|
diff --git a/rows/plugins/postgresql.py b/rows/plugins/postgresql.py
index <HASH>..<HASH> 100644
--- a/rows/plugins/postgresql.py
+++ b/rows/plugins/postgresql.py
@@ -355,6 +355,11 @@ class PostgresCopy:
has_header=True,
callback=None,
):
+ # TODO: replace skip_header with skip_rows (and if > 0, consume the CSV
+ # before sending do psql's stdin)
+ if not has_header and skip_header:
+ raise ValueError("Cannot skip header when no header is available")
+
# Prepare the `psql` command to be executed based on collected metadata
command = get_psql_copy_command(
database_uri=self.database_uri,
@@ -379,9 +384,6 @@ class PostgresCopy:
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
- if skip_header and has_header:
- # Read the first line (which will be the header in most cases)
- fobj.readline()
data = fobj.read(self.chunk_size)
total_read, total_written = 0, 0
while data != b"":
|
Do not skip header manually (rely on psql)
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -14,7 +14,8 @@ if 'setuptools.extension' in sys.modules:
version_py = os.path.join(os.path.dirname(__file__), 'metaseq', 'version.py')
version = open(version_py).read().split('=')[-1].strip().replace('"','')
-exts = [Extension(
+exts = []
+[Extension(
'metaseq.rebin',
['metaseq/rebin.pyx'],
include_dirs=[numpy.get_include()])]
|
remove cython ext from setup.py
|
py
|
diff --git a/annex.py b/annex.py
index <HASH>..<HASH> 100644
--- a/annex.py
+++ b/annex.py
@@ -48,7 +48,6 @@ class Annex(object):
self.base_plugin = base_plugin
self.plugin_dirs = set()
- self.plugin_files = []
self.loaded_modules = {}
for plugin_dir in plugin_dirs:
@@ -57,8 +56,7 @@ class Annex(object):
else:
self.plugin_dirs.update(plugin_dir)
- self.plugin_files = self._get_plugin_files(self.plugin_dirs)
- for plugin_file in self.plugin_files:
+ for plugin_file in self._get_plugin_files(self.plugin_dirs):
self._load_plugin(plugin_file)
def __len__(self):
@@ -69,6 +67,12 @@ class Annex(object):
for plugin in modules.plugins:
yield plugin
+ def __getattr__(self, name):
+ for plugin in self:
+ if plugin.__class__.__name__ == name:
+ return plugin
+ raise AttributeError
+
def reload(self):
logger.debug("Reloading Plugins...")
|
Add __getattr__ the Annex for accessing plugins
|
py
|
diff --git a/inverse_covariance/quic_graph_lasso.py b/inverse_covariance/quic_graph_lasso.py
index <HASH>..<HASH> 100644
--- a/inverse_covariance/quic_graph_lasso.py
+++ b/inverse_covariance/quic_graph_lasso.py
@@ -637,17 +637,9 @@ class QuicGraphLassoEBIC(InverseCovarianceEstimator):
sample_covariance_ : 2D ndarray, shape (n_features, n_features)
Estimated sample covariance matrix
- lam_scale_ : (float)
- Additional scaling factor on lambda (due to magnitude of
- sample_covariance_ values).
-
lam_ : (float)
Lambda chosen by EBIC (with scaling already applied).
- path_ : None or array of floats
- Sorted (largest to smallest) path. This will be None if not in path
- mode.
-
opt_ :
cputime_ :
|
Remove some attributes from comment to avoid confusion
|
py
|
diff --git a/shutit_module.py b/shutit_module.py
index <HASH>..<HASH> 100644
--- a/shutit_module.py
+++ b/shutit_module.py
@@ -23,10 +23,38 @@
from abc import ABCMeta, abstractmethod
import sys
import decimal
+import inspect
+
+def shutit_method_scope(func):
+ def wrapper(self, shutit):
+ return func(self, shutit)
+ return wrapper
class ShutItMeta(ABCMeta):
ShutItModule = None
def __new__(mcs, name, bases, local):
+
+ # Don't wrap methods of the ShutItModule class, only subclasses
+ if name != 'ShutItModule':
+
+ sim = mcs.ShutItModule
+ assert sim is not None
+
+ # Wrap any of the ShutItModule (self, shutit) methods that have been
+ # overridden in a subclass
+ for name, method in local.iteritems():
+ if not hasattr(sim, name):
+ continue
+ if not callable(method):
+ continue
+ sim_method = getattr(sim, name)
+ if sim_method is method:
+ continue
+ args = inspect.getargspec(sim_method)[0]
+ if args != ['self', 'shutit']:
+ continue
+ local[name] = shutit_method_scope(method)
+
cls = super(ShutItMeta, mcs).__new__(mcs, name, bases, local)
if name == 'ShutItModule':
mcs.ShutItModule = cls
|
Extend metaclass to wrap some methods in a nil decorator
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -1,6 +1,10 @@
import os
from setuptools import setup
+# Quick and dirty fix for http://bugs.python.org/issue8876
+if os.environ.get('USER','') == 'vagrant':
+ del os.link
+
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
|
Added Vagrant workaround to setup.py
|
py
|
diff --git a/yacron/job.py b/yacron/job.py
index <HASH>..<HASH> 100644
--- a/yacron/job.py
+++ b/yacron/job.py
@@ -185,8 +185,8 @@ class MailReporter(Reporter):
message = EmailMessage()
message.set_content(body)
message["From"] = mail["from"]
- message["To"] = mail["to"]
- message["Subject"] = subject
+ message["To"] = mail["to"].strip()
+ message["Subject"] = subject.strip()
message["Date"] = datetime.now(timezone.utc)
smtp = aiosmtplib.SMTP(
hostname=smtp_host, port=smtp_port, use_tls=mail["tls"]
|
email: strip the To and Subject header values of trailing whitespace This is to avoid potentially including a trailing newline, which causes email formatting problems.
|
py
|
diff --git a/pyethereum/peermanager.py b/pyethereum/peermanager.py
index <HASH>..<HASH> 100644
--- a/pyethereum/peermanager.py
+++ b/pyethereum/peermanager.py
@@ -271,15 +271,11 @@ def peer_addresses_received_handler(sender, addresses, **kwargs):
peer_manager.save_peers()
-txfilter = SentFilter()
-
-
@receiver(signals.send_local_transactions)
def send_transactions(sender, transactions=[], **kwargs):
transactions = [rlp.decode(t.serialize()) for t in transactions]
for peer in peer_manager.connected_ethereum_peers:
- peer.send_Transactions([tx for tx in transactions
- if txfilter.add(tx.hex_serialize(), peer)])
+ peer.send_Transactions(transactions)
@receiver(signals.peer_handshake_success)
|
rm: tx filter
|
py
|
diff --git a/test/geocoders/photon.py b/test/geocoders/photon.py
index <HASH>..<HASH> 100644
--- a/test/geocoders/photon.py
+++ b/test/geocoders/photon.py
@@ -37,8 +37,13 @@ class TestPhoton(BaseTestGeocoder):
async def test_bbox(self):
await self.geocode_run(
- {"query": "Marbach", "bbox": [(50.16, 10.67), (50.17, 10.68)]},
- {"latitude": 50.1667628, "longitude": 10.6786321, "delta": 2.0},
+ {"query": "moscow"},
+ {"latitude": 55.7504461, "longitude": 37.6174943},
+ )
+ await self.geocode_run(
+ {"query": "moscow", # Idaho USA
+ "bbox": [[50.1, -130.1], [44.1, -100.9]]},
+ {"latitude": 46.7323875, "longitude": -117.0001651},
)
async def test_unicode_name(self):
|
Photon tests: ensure bbox affects the result (#<I>)
|
py
|
diff --git a/tests/test_projects.py b/tests/test_projects.py
index <HASH>..<HASH> 100644
--- a/tests/test_projects.py
+++ b/tests/test_projects.py
@@ -64,6 +64,20 @@ class TestProjects(unittest.TestCase):
payload={'name': 'PR 1', 'description': 'PR desc 1'}
)
+ @patch('taiga.models.IssueStatuses.create')
+ def test_add_issue_status(self, mock_new_issue_status):
+ rm = RequestMaker('/api/v1', 'fakehost', 'faketoken')
+ project = Project(rm, id=1)
+ project.add_issue_status('Issue 1')
+ mock_new_issue_status.assert_called_with(1, 'Issue 1')
+
+ @patch('taiga.models.IssueStatuses.list')
+ def test_list_issue_statuses(self, mock_list_issue_statuses):
+ rm = RequestMaker('/api/v1', 'fakehost', 'faketoken')
+ project = Project(rm, id=1)
+ project.list_issue_statuses()
+ mock_list_issue_statuses.assert_called_with(project=1)
+
@patch('taiga.models.Priorities.create')
def test_add_priority(self, mock_new_priority):
rm = RequestMaker('/api/v1', 'fakehost', 'faketoken')
|
Add tests for project issue statuses
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100755
--- a/setup.py
+++ b/setup.py
@@ -1,10 +1,9 @@
from distutils.core import setup
from setuptools import find_packages
-from sendwithus.version import version
-setup (
+setup(
name='sendwithus',
- version=version,
+ version='1.0.5',
author='sendwithus',
author_email='[email protected]',
packages=find_packages(),
|
hardcode version to avoid trying to import requests at setuptime
|
py
|
diff --git a/ipyrad/load/load.py b/ipyrad/load/load.py
index <HASH>..<HASH> 100644
--- a/ipyrad/load/load.py
+++ b/ipyrad/load/load.py
@@ -274,8 +274,13 @@ def load_json(path, quiet=False):
sample_names = fullj["samples"].keys()
if not sample_names:
raise IPyradWarningExit("""
- No samples found in saved assembly. Try removing the assembly file.
+ No samples found in saved assembly. If you are just starting a new
+ assembly the file probably got saved erroneously, so it's safe to try
+ removing the assembly file and rerunning.
`rm {}`
+
+ If you fully completed step 1 and you see this message you should probably
+ contact the developers.
""".format(inpath))
sample_keys = fullj["samples"][sample_names[0]].keys()
|
more explicit error handling in the case of no samples in json
|
py
|
diff --git a/pystratum/RoutineLoaderHelper.py b/pystratum/RoutineLoaderHelper.py
index <HASH>..<HASH> 100644
--- a/pystratum/RoutineLoaderHelper.py
+++ b/pystratum/RoutineLoaderHelper.py
@@ -324,7 +324,7 @@ class RoutineLoaderHelper(metaclass=abc.ABCMeta):
"""
ret = True
- pattern = re.compile('(@[A-Za-z0-9_.]+(%type)?@)')
+ pattern = re.compile('(@[A-Za-z0-9_.]+(%(max-)?type)?@)')
matches = pattern.findall(self._routine_source_code)
placeholders = []
|
Place holders for pseudo types with maximum length of their base type.
|
py
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 56