diff
stringlengths 139
3.65k
| message
stringlengths 8
627
| diff_languages
stringclasses 1
value |
---|---|---|
diff --git a/salt/grains/core.py b/salt/grains/core.py
index <HASH>..<HASH> 100644
--- a/salt/grains/core.py
+++ b/salt/grains/core.py
@@ -375,7 +375,7 @@ def _memdata(osdata):
mem = __salt__['cmd.run']('{0} -n hw.physmem'.format(sysctl))
if (osdata['kernel'] == 'NetBSD' and mem.startswith('-')):
mem = __salt__['cmd.run']('{0} -n hw.physmem64'.format(sysctl))
- grains['mem_total'] = str(int(mem) / 1024 / 1024)
+ grains['mem_total'] = int(mem) / 1024 / 1024
elif osdata['kernel'] == 'SunOS':
prtconf = '/usr/sbin/prtconf 2>/dev/null'
for line in __salt__['cmd.run'](prtconf).splitlines():
|
mem_total in BSD should return as an int, see #<I>
|
py
|
diff --git a/setuptools/msvc.py b/setuptools/msvc.py
index <HASH>..<HASH> 100644
--- a/setuptools/msvc.py
+++ b/setuptools/msvc.py
@@ -233,10 +233,11 @@ def msvc14_library_dir_option(self, dir):
------
"\LIBPATH" argument: str
"""
- if ' ' in dir and '"' not in dir:
+ opt = unpatched['msvc14_library_dir_option'](self, dir)
+ if ' ' in opt and '"' not in opt:
# Quote if space and not already quoted
- dir = '"%s"' % dir
- return unpatched['msvc14_library_dir_option'](self, dir)
+ opt = '"%s"' % opt
+ return opt
def _augment_exception(exc, version, arch=''):
|
quote library_dir_option after calling unpatched version avoids double-quotes if the calling function does the quoting correctly.
|
py
|
diff --git a/tests/test_selectors.py b/tests/test_selectors.py
index <HASH>..<HASH> 100644
--- a/tests/test_selectors.py
+++ b/tests/test_selectors.py
@@ -16,16 +16,20 @@ class YapoAdInsertPage(sunbro.Page):
class TestSimplePageObject(unittest.TestCase):
+ def setUp(self):
+ self.driver = webdriver.Firefox()
+
+ def tearDown(self):
+ self.driver.quit()
+
def test_form_fill(self):
- driver = webdriver.Firefox()
- page = YapoAdInsertPage(driver)
+ page = YapoAdInsertPage(self.driver)
page.go()
page.subject.send_keys('Praise the sun, bros!')
page.body.send_keys('...to summon one another as'
' spirits, cross the gaps between'
' the worlds, and engage in jolly co-operation!')
page.price.send_keys('1231')
- driver.quit()
def test_get_selector(self):
page = YapoAdInsertPage(None)
|
setup/teardown for selector test
|
py
|
diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py
index <HASH>..<HASH> 100755
--- a/src/transformers/trainer.py
+++ b/src/transformers/trainer.py
@@ -646,7 +646,7 @@ class Trainer:
if iterator is not None:
iterator.write(output)
else:
- logger.info(output)
+ print(output)
def _prepare_inputs(
self, inputs: Dict[str, Union[torch.Tensor, Any]], model: nn.Module
|
Logs should not be hidden behind a logger.info (#<I>)
|
py
|
diff --git a/tests/bulk_properties.py b/tests/bulk_properties.py
index <HASH>..<HASH> 100755
--- a/tests/bulk_properties.py
+++ b/tests/bulk_properties.py
@@ -127,6 +127,10 @@ tests = [
[ dict( name='dia-C', struct=Diamond('C', size=[sx,sx,sx]),
Ec=7.370, a0=3.566, C11=1080, C12=130, C44=720 )
] ),
+ ( TabulatedEAM, dict(fn='Au_u3.eam'),
+ [ dict( name='fcc-Au', struct=FaceCenteredCubic('Au', size=[sx,sx,sx]),
+ Ec=3.93, a0=4.08, B=167, C11=183, C12=159, C44=45)
+ ] ),
( TabulatedAlloyEAM, dict(fn='Au-Grochola-JCP05.eam.alloy'),
[ dict( name='fcc-Au', struct=FaceCenteredCubic('Au', size=[sx,sx,sx]),
Ec=3.924, a0=4.070, C11=202, C12=170, C44=47, C440=46)
|
Testing: Added TabulatedEAM to bulk properties test.
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -14,6 +14,15 @@ SETUP = {
'author': "Ubuntu Developers",
'author_email': "[email protected]",
'url': "https://code.launchpad.net/charm-helpers",
+ 'install_requires': [
+ # Specify precise versions of runtime dependencies where possible.
+ 'netaddr==0.7.10', # trusty. precise is 0.7.5, but not in pypi.
+ 'PyYAML==3.10', # precise
+ 'Tempita==0.5.1', # precise
+ 'netifaces==0.10', # trusty is 0.8, but using py3 compatible version for tests.
+ 'Jinja2==2.6', # precise
+ 'six==1.1', # precise
+ ],
'packages': [
"charmhelpers",
"charmhelpers.cli",
|
Added install_requires section to setup.py to ensure dependencies are met when installing from pypi
|
py
|
diff --git a/raiden_contracts/utils/transaction.py b/raiden_contracts/utils/transaction.py
index <HASH>..<HASH> 100644
--- a/raiden_contracts/utils/transaction.py
+++ b/raiden_contracts/utils/transaction.py
@@ -3,6 +3,7 @@ from typing import Optional, Tuple
from hexbytes import HexBytes
from web3 import Web3
from web3._utils.threads import Timeout
+from web3.exceptions import TransactionNotFound
from web3.types import TxData, TxReceipt
@@ -36,11 +37,8 @@ def wait_for_transaction_receipt(
while not receipt or not receipt["blockNumber"]: # pylint: disable=E1136
try:
receipt = web3.eth.getTransactionReceipt(txid)
- except ValueError as ex:
- if str(ex).find("EmptyResponse") != -1:
- pass # Empty response from a Parity light client
- else:
- raise ex
+ except TransactionNotFound:
+ pass
time.sleep(5)
return receipt
|
Fix wait_for_transaction_receipt after web3 update
|
py
|
diff --git a/spinoff/tests/actor_test.py b/spinoff/tests/actor_test.py
index <HASH>..<HASH> 100644
--- a/spinoff/tests/actor_test.py
+++ b/spinoff/tests/actor_test.py
@@ -318,34 +318,6 @@ def test_actor_does_not_have_to_catch_actorstopped():
container.consume_message(('stopped', proc))
-def test_actor_must_exit_after_being_stopped():
- # actor that violates the rule
- @actor
- def X(self):
- while True:
- try:
- yield Deferred()
- except GeneratorExit:
- pass
-
- with contain(X) as (container, proc):
- proc.stop()
- container.consume_message(('stopped', proc, 'refused'),
- message="actor should not be allowed to continue working when stopped")
-
- # actor that complies with the rule
- @actor
- def Proc2(self):
- while True:
- try:
- yield Deferred()
- except GeneratorExit:
- break
- with contain(Proc2) as (container, proc2):
- proc2.stop()
- container.consume_message(('stopped', proc2))
-
-
def test_actor_with_args():
passed_values = [None, None]
|
Remove the actor_test:test_actor_must_exit_after_being_stopped obsolte (and failing) test
|
py
|
diff --git a/jira/version.py b/jira/version.py
index <HASH>..<HASH> 100644
--- a/jira/version.py
+++ b/jira/version.py
@@ -2,4 +2,4 @@
# 1) we don't load dependencies by storing it in __init__.py
# 2) we can import it in setup.py for the same reason
# 3) we can import it into the jira module
-__version__ = '0.11'
+__version__ = '0.12'
|
Bump to version <I> for imminent release
|
py
|
diff --git a/synapse/queue.py b/synapse/queue.py
index <HASH>..<HASH> 100644
--- a/synapse/queue.py
+++ b/synapse/queue.py
@@ -110,12 +110,13 @@ class BulkQueue(EventBus):
return len(self.items)
def __iter__(self):
- try:
- while True:
- for i in self.get(timeout=1):
- yield i
- except QueueShutdown as e:
- pass
+ while True:
+ items = self.get(timeout=1)
+ if items == None:
+ return
+
+ for i in items:
+ yield i
def put(self, item):
'''
@@ -142,7 +143,12 @@ class BulkQueue(EventBus):
Example:
- for item in q.get():
+ items = q.get()
+ if items == None:
+ # the queue is shutdown
+ return
+
+ for item in items:
dostuff(item)
'''
@@ -167,19 +173,19 @@ class BulkQueue(EventBus):
return self._get_items()
if self.isfini:
- raise QueueShutdown()
+ return None
# Clear the event so we can wait...
self.event.clear()
self.event.wait(timeout=timeout)
if self.isfini:
- raise QueueShutdown()
+ return None
with self.lock:
self.last = time.time()
if not self.items and self.isfini:
- raise QueueShutdown()
+ return None
return self._get_items()
def peek(self):
|
updated queue API to return None on shutdown rather than exception
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -34,7 +34,7 @@ requirements = [
# Protocol and data packages
"pytmpdir >= 0.2.3", # A temporary directory, useful for extracting archives to
"txhttputil >= 0.3.5", # Utility class for http requests
- "vortexpy >= 1.3.0", # Data serialisation and transport layer, observable based
+ "vortexpy >= 1.3.1", # Data serialisation and transport layer, observable based
# SOAP interface packages
"SOAPpy-py3 >= 0.52.24", # See http://soappy.ooz.ie for tutorials
|
PEEK-<I>: imp: Upgraded to "vortexpy >= <I>"
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -1,6 +1,5 @@
#!/usr/bin/env python
"""Django/PostgreSQL implementation of the Meteor DDP service."""
-import os.path
from setuptools import setup, find_packages
CLASSIFIERS = [
|
Remove unused import (os.path) from setup.
|
py
|
diff --git a/tests/remotes/hdfs.py b/tests/remotes/hdfs.py
index <HASH>..<HASH> 100644
--- a/tests/remotes/hdfs.py
+++ b/tests/remotes/hdfs.py
@@ -2,7 +2,6 @@ import locale
import os
import platform
import subprocess
-import sys
import uuid
from contextlib import contextmanager
from pathlib import Path
@@ -74,10 +73,7 @@ class HDFS(Base, URLInfo): # pylint: disable=abstract-method
@pytest.fixture(scope="session")
def hadoop(test_config):
test_config.requires("real_hdfs")
-
- if sys.version_info >= (3, 10):
- pytest.skip("pyarrow is not available for 3.10 yet.")
-
+ pytest.importorskip("pyarrow.fs")
if platform.system() != "Linux":
pytest.skip("only supported on Linux")
@@ -280,9 +276,7 @@ def hdfs(test_config, mocker):
# "The pyarrow installation is not built with support for
# 'HadoopFileSystem'"
test_config.requires("hdfs")
-
- if sys.version_info >= (3, 10):
- pytest.skip("pyarrow is not available for 3.10 yet.")
+ pytest.importorskip("pyarrow.fs")
mocker.patch("pyarrow.fs._not_imported", [])
mocker.patch(
|
tests: skip based on conditional import on pyarrow From now on, pyarrow dependency is optional for the tests.
|
py
|
diff --git a/zengine/lib/forms.py b/zengine/lib/forms.py
index <HASH>..<HASH> 100644
--- a/zengine/lib/forms.py
+++ b/zengine/lib/forms.py
@@ -9,7 +9,7 @@ class JsonForm(Form):
def serialize(self):
result = {
"schema": {
- "title": self.Meta.title,
+ "title": self.title,
"type": "object",
"properties": {},
"required": []
|
fixes zetaops/ulakbus-ui#<I> the "form titles are wrong" issue.
|
py
|
diff --git a/bcbio/rnaseq/pizzly.py b/bcbio/rnaseq/pizzly.py
index <HASH>..<HASH> 100644
--- a/bcbio/rnaseq/pizzly.py
+++ b/bcbio/rnaseq/pizzly.py
@@ -15,6 +15,7 @@ from bcbio.distributed.transaction import file_transaction
from bcbio.rnaseq import kallisto, sailfish, gtf
from bcbio.provenance import do
from bcbio.utils import file_exists, safe_makedir
+from bcbio.bam import fasta
h5py = utils.LazyImport("h5py")
import numpy as np
@@ -42,6 +43,9 @@ def run_pizzly(data):
gtf_fa = dd.get_transcriptome_fasta(data)
else:
gtf_fa = sailfish.create_combined_fasta(data)
+ stripped_fa = os.path.splitext(os.path.basename(gtf_fa))[0] + "-noversions.fa"
+ stripped_fa = os.path.join(pizzlydir, stripped_fa)
+ gtf_fa = fasta.strip_transcript_versions(gtf_fa, stripped_fa)
fraglength = get_fragment_length(data)
cachefile = os.path.join(pizzlydir, "pizzly.cache")
fusions = kallisto.get_kallisto_fusions(data)
|
fix for pizzly not respecting transcript versions pizzly ignores transcript versions in the GTF file, so we need to remove them from any provided FASTA files, or else it won't be able to match them up. Thanks to Avinash Reddy for pointing out the issue.
|
py
|
diff --git a/holidays/countries/egypt.py b/holidays/countries/egypt.py
index <HASH>..<HASH> 100644
--- a/holidays/countries/egypt.py
+++ b/holidays/countries/egypt.py
@@ -111,9 +111,9 @@ class Egypt(HolidayBase):
for date_obs in get_gre_date(year, 12, 9):
hol_date = date_obs
self[hol_date] = "Arafat Day"
- self[hol_date + rd(days=1)] = "Eid al-Fitr"
- self[hol_date + rd(days=2)] = "Eid al-Fitr Holiday"
- self[hol_date + rd(days=3)] = "Eid al-Fitr Holiday"
+ self[hol_date + rd(days=1)] = "Eid al-Adha"
+ self[hol_date + rd(days=2)] = "Eid al-Adha Holiday"
+ self[hol_date + rd(days=3)] = "Eid al-Adha Holiday"
# Islamic New Year - (hijari_year, 1, 1)
for date_obs in get_gre_date(year, 1, 1):
|
Fix egypt typo - Aid Adha
|
py
|
diff --git a/anyconfig/mergeabledict.py b/anyconfig/mergeabledict.py
index <HASH>..<HASH> 100644
--- a/anyconfig/mergeabledict.py
+++ b/anyconfig/mergeabledict.py
@@ -283,10 +283,8 @@ def create_from(obj=None, ac_ordered=False,
class MergeableDict(dict):
+ """Dict based object supports 'merge' operation.
"""
- Dict based object supports 'merge' operation.
- """
-
strategy = MS_DICTS
def get_strategy(self):
|
refactor: remove an extra spaces
|
py
|
diff --git a/gspread/client.py b/gspread/client.py
index <HASH>..<HASH> 100644
--- a/gspread/client.py
+++ b/gspread/client.py
@@ -230,8 +230,17 @@ class Client(object):
def import_csv(self, file_id, data):
"""Imports data into the first page of the spreadsheet.
- :param data: A CSV string of data.
- :type data: str
+ :param str data: A CSV string of data.
+
+ Example:
+
+ .. code::
+
+ # Read CSV file contents
+ content = open('file_to_import.csv', 'r').read()
+
+ gc.import_csv(spreadsheet.id, content)
+
"""
headers = {'Content-Type': 'text/csv'}
url = '{0}/{1}'.format(DRIVE_FILES_UPLOAD_API_V2_URL, file_id)
|
Extend a docstring of `import_csv()` with an example usage
|
py
|
diff --git a/pythainlp/tokenize/crfcut.py b/pythainlp/tokenize/crfcut.py
index <HASH>..<HASH> 100644
--- a/pythainlp/tokenize/crfcut.py
+++ b/pythainlp/tokenize/crfcut.py
@@ -192,6 +192,7 @@ def segment(text: str) -> List[str]:
toks = word_tokenize(text)
feat = extract_features(toks)
labs = _tagger.tag(feat)
+ labs[-1] = 'E' #make sure it cuts the last sentence
sentences = []
sentence = ""
|
fix crfcut last segment not included if not predicted as end-of-sentence
|
py
|
diff --git a/parambokeh/__init__.py b/parambokeh/__init__.py
index <HASH>..<HASH> 100644
--- a/parambokeh/__init__.py
+++ b/parambokeh/__init__.py
@@ -114,7 +114,7 @@ class default_label_formatter(param.ParameterizedFunction):
if self.replace_underscores:
pname = pname.replace('_',' ')
if self.capitalize:
- pname = pname.capitalize()
+ pname = pname[:1].upper() + pname[1:]
return pname
|
Fixed label formatting to change case only of first letter
|
py
|
diff --git a/tabular_predDB/python_utils/sample_utils.py b/tabular_predDB/python_utils/sample_utils.py
index <HASH>..<HASH> 100644
--- a/tabular_predDB/python_utils/sample_utils.py
+++ b/tabular_predDB/python_utils/sample_utils.py
@@ -372,10 +372,15 @@ def simple_predictive_sample_unobserved(M_c, X_L, X_D, Y, query_row,
samples_list.append(this_sample_draws)
return samples_list
-def continuous_imputation(samples, get_next_seed):
+def continuous_imputation(samples, get_next_seed, return_confidence=False):
n_samples = len(samples)
mean_sample = sum(samples) / float(n_samples)
- return mean_sample
+ if return_confidence:
+ print "sample_utils.continuous_imputation: return_confidence not yet implemented"
+ condience = None
+ return mean_sample, None
+ else:
+ return mean_sample
def multinomial_imputation(samples, get_next_seed, return_confidence=False):
counter = Counter(samples)
|
continuous_imputation: take a return_and_confidence argument, but don't handle it
|
py
|
diff --git a/src/pip_shims/backports.py b/src/pip_shims/backports.py
index <HASH>..<HASH> 100644
--- a/src/pip_shims/backports.py
+++ b/src/pip_shims/backports.py
@@ -1159,7 +1159,6 @@ def resolve(
"use_user_site",
"isolated",
"use_user_site",
- "require_hashes",
]
resolver_args = {key: kwargs[key] for key in resolver_keys if key in kwargs}
if resolver_provider is None:
|
Don't pass unused args to the resolver
|
py
|
diff --git a/mbed_cloud/account_management.py b/mbed_cloud/account_management.py
index <HASH>..<HASH> 100644
--- a/mbed_cloud/account_management.py
+++ b/mbed_cloud/account_management.py
@@ -724,7 +724,7 @@ class Group(BaseObject):
"account_id": "account_id",
"name": "name",
"user_count": "user_count",
- "api_key_count": "api_key_count",
+ "apikey_count": "apikey_count",
"created_at": "created_at",
"creation_time": "creation_time",
"last_update_time": "last_update_time"
@@ -763,12 +763,12 @@ class Group(BaseObject):
return self._user_count
@property
- def api_key_count(self):
+ def apikey_count(self):
"""The number of API keys in this group. (readonly)
:rtype: int
"""
- return self._api_key_count
+ return self._apikey_count
@property
def created_at(self):
|
Fix apikey_count property.
|
py
|
diff --git a/odl/tomo/backends/stir_bindings.py b/odl/tomo/backends/stir_bindings.py
index <HASH>..<HASH> 100644
--- a/odl/tomo/backends/stir_bindings.py
+++ b/odl/tomo/backends/stir_bindings.py
@@ -44,6 +44,12 @@ from builtins import super
try:
import stir
+ # Fix for stirextra being moved around in various stir versions
+ try:
+ stirextra = stir.stirextra
+ except AttributeError:
+ import stirextra
+
STIR_AVAILABLE = True
except ImportError:
STIR_AVAILABLE = False
@@ -159,7 +165,7 @@ class ForwardProjectorByBinWrapper(Operator):
self.projector.forward_project(self.proj_data, self.volume)
# make odl data
- out[:] = stir.stirextra.to_numpy(self.proj_data)
+ out[:] = stirextra.to_numpy(self.proj_data)
@property
def adjoint(self):
@@ -256,7 +262,7 @@ class BackProjectorByBinWrapper(Operator):
self.back_projector.back_project(self.volume, self.proj_data)
# make odl data
- out[:] = stir.stirextra.to_numpy(self.volume)
+ out[:] = stirextra.to_numpy(self.volume)
def stir_projector_from_file(volume_file, projection_file):
|
BUG: fix bug with stirextra import in stir_bindings
|
py
|
diff --git a/doc/source/conf.py b/doc/source/conf.py
index <HASH>..<HASH> 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -23,7 +23,8 @@ import datetime
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax','sphinx.ext.ifconfig']
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax','sphinx.ext.ifconfig',
+ 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
|
Add viewdoc extension to link to the source code in the docs [ci skip]
|
py
|
diff --git a/devassistant/dapi/__init__.py b/devassistant/dapi/__init__.py
index <HASH>..<HASH> 100644
--- a/devassistant/dapi/__init__.py
+++ b/devassistant/dapi/__init__.py
@@ -15,6 +15,14 @@ from devassistant.exceptions import DapFileError, DapMetaError, DapInvalid
from devassistant.logger import logger
+class DapProblem(object):
+ '''Class denoting a problem with a DAP'''
+
+ def __init__(self, message, level=logging.ERROR):
+ self.message = message
+ self.level = level
+
+
class DapFormatter(object):
'''Formatter for different output information for the Dap class'''
|
Added the DapProblem class
|
py
|
diff --git a/src/python/sikuli.py b/src/python/sikuli.py
index <HASH>..<HASH> 100644
--- a/src/python/sikuli.py
+++ b/src/python/sikuli.py
@@ -117,12 +117,12 @@ class SikuliLibrary(object):
def _output_file(self):
outputDir = self._get_output_folder()
outputFile = 'Sikuli_java_stdout_'+str(time.time())+'.txt'
- return outputFile
+ return os.path.join(outputDir, outputFile)
def _err_file(self):
outputDir = self._get_output_folder()
errFile = 'Sikuli_java_stderr_'+str(time.time())+'.txt'
- return errFile
+ return os.path.join(outputDir, errFile)
def _get_output_folder(self):
outputDir = os.path.abspath(os.curdir)
|
stdout, stderr file of java process should be in OUTPUTDIR
|
py
|
diff --git a/Test.py b/Test.py
index <HASH>..<HASH> 100644
--- a/Test.py
+++ b/Test.py
@@ -136,6 +136,8 @@ class Widget:
self.children.remove(widget)
def add_stretch(self):
pass
+ def add_spacing(self, spacing):
+ pass
def create_layer(self):
return Widget()
def draw(self):
|
Add missing method for passing tests. svn r<I>
|
py
|
diff --git a/oinkwall/firewall.py b/oinkwall/firewall.py
index <HASH>..<HASH> 100644
--- a/oinkwall/firewall.py
+++ b/oinkwall/firewall.py
@@ -533,7 +533,6 @@ class HostsAllow:
def get_hosts_allow_content(self):
lines = []
- lines.append('# hosts.allow configuration written by oinkwall v%s' % __version__)
for rule in self.rules:
if 'comment' in rule:
lines.append("# %s" % rule['comment'])
|
Remove spam from generated hosts.allow The version number causes a lot of changes in a dry-run when oinkwall version changes, remove it. Remove the whole line, who cares.
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -64,7 +64,6 @@ PARAMS['classifiers'] = [
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Libraries',
|
Removing python-<I> with the stabilization of <I>.
|
py
|
diff --git a/openupgradelib/openupgrade.py b/openupgradelib/openupgrade.py
index <HASH>..<HASH> 100644
--- a/openupgradelib/openupgrade.py
+++ b/openupgradelib/openupgrade.py
@@ -368,6 +368,8 @@ def rename_models(cr, model_spec):
functionality you will want to update references in for instance
relation fields.
+ WARNING: This method doesn't rename the associated tables. For that,
+ you need to call `rename_tables` method.
"""
for (old, new) in model_spec:
logger.info("model %s: renaming to %s",
|
[IMP] rename_models: Add warning on docstring For acknowledge of the need of calling rename_tables also.
|
py
|
diff --git a/src/rotest/core/runner.py b/src/rotest/core/runner.py
index <HASH>..<HASH> 100644
--- a/src/rotest/core/runner.py
+++ b/src/rotest/core/runner.py
@@ -137,9 +137,6 @@ def parse_config_file(json_path, schema_path=DEFAULT_SCHEMA_PATH):
config = parse(json_path=json_path,
schema_path=schema_path)
- if "outputs" in config:
- config.outputs = set(config.outputs)
-
return config
|
Removed some cast to set when parsing the config file.
|
py
|
diff --git a/thermo/utils.py b/thermo/utils.py
index <HASH>..<HASH> 100644
--- a/thermo/utils.py
+++ b/thermo/utils.py
@@ -2367,7 +2367,7 @@ class TDependentProperty(object):
prop : float
Calculated property, [`units`]
'''
- method = self.method
+ method = self._method
if method == POLY_FIT:
try: return self.calculate(T, POLY_FIT)
except: return None
|
Find and remove one unnecessary duplicate function call to make up for the extra function call added in extrapolate of TDependentProperty
|
py
|
diff --git a/PySimpleGUI.py b/PySimpleGUI.py
index <HASH>..<HASH> 100644
--- a/PySimpleGUI.py
+++ b/PySimpleGUI.py
@@ -542,7 +542,7 @@ class InputText(Element):
def Update(self, value=None, disabled=None, select=None, visible=None):
if disabled is True:
- self.TKEntry['state'] = 'disabled'
+ self.TKEntry['state'] = 'readonly'
elif disabled is False:
self.TKEntry['state'] = 'normal'
if value is not None:
@@ -4944,7 +4944,7 @@ def PackFormIntoFrame(form, containing_frame, toplevel_form):
focus_set = True
element.TKEntry.focus_set()
if element.Disabled:
- element.TKEntry['state'] = 'disabled'
+ element.TKEntry['state'] = 'readonly'
if element.Tooltip is not None:
element.TooltipObject = ToolTip(element.TKEntry, text=element.Tooltip, timeout=DEFAULT_TOOLTIP_TIME)
if element.RightClickMenu or toplevel_form.RightClickMenu:
|
Changed disabled InputText from disabled to readonly
|
py
|
diff --git a/requests_oauthlib/oauth2_session.py b/requests_oauthlib/oauth2_session.py
index <HASH>..<HASH> 100644
--- a/requests_oauthlib/oauth2_session.py
+++ b/requests_oauthlib/oauth2_session.py
@@ -197,7 +197,8 @@ class OAuth2Session(requests.Session):
redirect_uri=self.redirect_uri, username=username,
password=password, **kwargs)
- auth = auth or requests.auth.HTTPBasicAuth(username, password)
+ if not auth and (username):
+ auth = auth or requests.auth.HTTPBasicAuth(username, password)
headers = headers or {
'Accept': 'application/json',
|
Preventing sending Basic Auth headers with "None:None" Closes #<I>
|
py
|
diff --git a/compiler/doc/json.py b/compiler/doc/json.py
index <HASH>..<HASH> 100644
--- a/compiler/doc/json.py
+++ b/compiler/doc/json.py
@@ -134,6 +134,8 @@ class Component(object):
package, name = self.package, self.name
r.append('{' )
r.append('\t"name": "%s.%s",' %(package, name))
+ comp = self.component
+ r.append('\t"text": "%s",' %(comp.doc.text if hasattr(comp, "doc") and hasattr(comp.doc, "text") else ""))
r.append('')
r.append('\t"content": {')
self.process_children(r, package)
|
Add docs for root component.
|
py
|
diff --git a/pygccxml/parser/declarations_cache.py b/pygccxml/parser/declarations_cache.py
index <HASH>..<HASH> 100644
--- a/pygccxml/parser/declarations_cache.py
+++ b/pygccxml/parser/declarations_cache.py
@@ -137,7 +137,7 @@ class file_cache_t( cache_base_t ):
cache_base_t.__init__( self )
self.__name = name
self.__cache = self.__load( self.__name )
- self.__needs_flushed = bool( self.__cache ) # If empty then we need to flush
+ self.__needs_flushed = not bool( self.__cache ) # If empty then we need to flush
for entry in self.__cache.itervalues(): # Clear hit flags
entry.was_hit = False
|
removing unnecessary cache flushing ( fix from Allen )
|
py
|
diff --git a/lib/parseinput.py b/lib/parseinput.py
index <HASH>..<HASH> 100644
--- a/lib/parseinput.py
+++ b/lib/parseinput.py
@@ -2,8 +2,11 @@
# Program: parseinput.py
# Author: Christopher Hanley
# History:
-# Version 0.1, 11/02/2004: Initial Creation -- CJH
-__version__ = '0.1.1 (12/06/2004)'
+# Version 0.1, 11/02/2004: Initial Creation -- CJH
+# Version 0.1.2 01/10/2005: Removed the appending of "_drz.fits" to extracted
+# file names. -- CJH
+
+__version__ = '0.1.2 (01/10/2005)'
__author__ = 'Christopher Hanley'
# irafglob provides the ability to recursively parse user input that
@@ -86,7 +89,7 @@ def parseinput(inputlist,outputname=None):
# Extract the output name from the association table if None
# was provided on input.
if outputname == None:
- newoutputname = assocdict['output']+'_drz.fits'
+ newoutputname = assocdict['output']
# Loop over the association dictionary to extract the input
# file names.
|
<I>/<I>/<I> -- CJH -- Removed the append of "_drz.fits" to output names extracted from an input assocation table. It is up to the calling program to determine the appropriate final from of the output name. Treat the output name returned from this function as a file's rootname. git-svn-id: <URL>
|
py
|
diff --git a/simuvex/s_arch.py b/simuvex/s_arch.py
index <HASH>..<HASH> 100755
--- a/simuvex/s_arch.py
+++ b/simuvex/s_arch.py
@@ -772,6 +772,7 @@ class SimPPC64(SimArch):
self.ret_instruction = "\x4e\x80\x00\x20"
self.nop_instruction = "\x60\x00\x00\x00"
self.instruction_alignment = 4
+ self.persistant_regs = [ 'toc' ]
if endness == "Iend_LE":
self.function_prologs = {
@@ -878,6 +879,19 @@ class SimPPC64(SimArch):
self.ret_instruction = self.ret_instruction[::-1]
self.nop_instruction = self.nop_instruction[::-1]
+ def gather_info_from_state(self, state):
+ info = {}
+ for reg in self.persistent_regs:
+ info[reg] = state.reg_expr(reg)
+ return info
+
+ def prepare_state(self, state, info=None):
+ if info is not None:
+ if 'toc' in info:
+ state.store_reg('r2', info['toc'])
+
+ return state
+
Architectures = { }
Architectures["AMD64"] = SimAMD64
Architectures["X86"] = SimX86
|
Add rules for preserving r2 in ppc<I>
|
py
|
diff --git a/pygrok/pygrok.py b/pygrok/pygrok.py
index <HASH>..<HASH> 100644
--- a/pygrok/pygrok.py
+++ b/pygrok/pygrok.py
@@ -70,7 +70,7 @@ def grok_match(text, pattern, custom_patterns = {}, custom_patterns_dir = None):
matches[key] = int(match)
if type_mapper[key] == 'float':
matches[key] = float(match)
- except KeyError:
+ except (TypeError, KeyError) as e:
pass
return matches
|
Taking incompatible input into account...
|
py
|
diff --git a/efopen/ef_service_registry.py b/efopen/ef_service_registry.py
index <HASH>..<HASH> 100644
--- a/efopen/ef_service_registry.py
+++ b/efopen/ef_service_registry.py
@@ -19,7 +19,7 @@ import subprocess
from collections import Counter
from os.path import isfile, normpath
-from efopen.ef_config import EFConfig
+from ef_config import EFConfig
class EFServiceRegistry(object):
|
revert the attempt to make the world better (#<I>)
|
py
|
diff --git a/tests/test_plowshare.py b/tests/test_plowshare.py
index <HASH>..<HASH> 100644
--- a/tests/test_plowshare.py
+++ b/tests/test_plowshare.py
@@ -24,7 +24,7 @@
# SOFTWARE.
import pytest
-from plowshare import Plowshare
+from plowshare.plowshare import Plowshare
# Fixtures
|
Import plowshare from proper location
|
py
|
diff --git a/pyte/compiler.py b/pyte/compiler.py
index <HASH>..<HASH> 100644
--- a/pyte/compiler.py
+++ b/pyte/compiler.py
@@ -8,7 +8,6 @@ import io
import sys
import types
import warnings
-from io import BytesIO
from typing import Any, Tuple
from pyte import tokens, util
@@ -24,7 +23,7 @@ def compile_bytecode(code: list) -> bytes:
:param code: A list of objects to compile.
:return: The computed bytecode.
"""
- bc = BytesIO()
+ bc = b""
for i, op in enumerate(code):
try:
# Get the bytecode.
@@ -36,13 +35,12 @@ def compile_bytecode(code: list) -> bytes:
bc_op = op
else:
raise CompileError("Could not compile code of type {}".format(type(op)))
- # Append it
- bc.write(bc_op)
+ bc += bc_op
except Exception as e:
print("Fatal compiliation error on operator {i} ({op}).".format(i=i, op=op))
raise e
- return bc.getvalue()
+ return bc
# TODO: Backport to <3.3
|
Revert usage of BytesIO for now.
|
py
|
diff --git a/backdrop/collector/write.py b/backdrop/collector/write.py
index <HASH>..<HASH> 100644
--- a/backdrop/collector/write.py
+++ b/backdrop/collector/write.py
@@ -2,16 +2,17 @@ import requests
class Backdrop(object):
- def __init__(self, location):
- self.location = location
+ def __init__(self, url, token):
+ self.url = url
+ self.token = token
- def post(self, contents, bucket, token):
+ def post(self, contents):
headers = {
- "Authorization": token,
+ "Authorization": "Bearer %s" % self.token,
"Content-type": "application/json"
}
requests.post(
- url=self.location + "/%s" % bucket,
+ url=self.url,
headers=headers,
data=contents
)
|
Match write api to what is being used in collectors
|
py
|
diff --git a/tests/utils/helpers.py b/tests/utils/helpers.py
index <HASH>..<HASH> 100644
--- a/tests/utils/helpers.py
+++ b/tests/utils/helpers.py
@@ -910,3 +910,31 @@ def random_location_generator(min_x=-180, max_x=180, min_y=-90, max_y=90):
return shapely.geometry.Point(
(min_x + random.random() * (max_x - min_x),
min_y + random.random() * (max_y - min_y)))
+
+
+class MultiMock(object):
+
+ def __init__(self, **mocks):
+ # dict of mock names -> mock paths
+ self._mocks = mocks
+ self.active_patches = {}
+ self.active_mocks = {}
+
+ def __enter__(self):
+ for key, value in self._mocks.iteritems():
+ the_patch = mock_module.patch(value)
+ self.active_patches[key] = the_patch
+ self.active_mocks[key] = the_patch.start()
+ return self
+
+ def __exit__(self, *args):
+ for each_mock in self.active_mocks.itervalues():
+ each_mock.stop()
+ for each_patch in self.active_patches.itervalues():
+ each_patch.stop()
+
+ def __iter__(self):
+ return self.active_mocks.itervalues()
+
+ def __getitem__(self, key):
+ return self.active_mocks.get(key)
|
tests/utils/helpers: Added `MultiMock` class to help handle multiple mock objects.
|
py
|
diff --git a/cumulusci/tasks/robotframework/debugger/ui.py b/cumulusci/tasks/robotframework/debugger/ui.py
index <HASH>..<HASH> 100644
--- a/cumulusci/tasks/robotframework/debugger/ui.py
+++ b/cumulusci/tasks/robotframework/debugger/ui.py
@@ -3,6 +3,7 @@ import textwrap
import os
import sys
import re
+import pdb
from robot.libraries.BuiltIn import BuiltIn
from cumulusci.cli.ui import CliTable
from selenium.common.exceptions import InvalidSelectorException
@@ -84,9 +85,8 @@ class DebuggerCli(cmd.Cmd):
The context will be this function, which won't be particularly
useful. This is mostly for debugging the debug code. How meta!
"""
- import pdb
- pdb.Pdb(stdout=sys.__stdout__).set_trace()
+ pdb.Pdb(stdout=self.stdout).set_trace()
def do_reset_elements(self, arg):
"""Remove all highlighting added by `locate_elements`"""
@@ -98,7 +98,7 @@ class DebuggerCli(cmd.Cmd):
"""
Execute a robot framework keyword. (shortcut: !)
- The statement should be formatted just line in a .robot
+ The statement should be formatted just like in a .robot
file, with two spaces between each argument.
Example:
|
tweak how pdb is started in the debugger 'pdb' command. Also fixed a typo in a docstring.
|
py
|
diff --git a/acceptancetests/jujupy/k8s_provider/aks.py b/acceptancetests/jujupy/k8s_provider/aks.py
index <HASH>..<HASH> 100644
--- a/acceptancetests/jujupy/k8s_provider/aks.py
+++ b/acceptancetests/jujupy/k8s_provider/aks.py
@@ -137,7 +137,7 @@ class AKS(Base):
service_principal_profile=service_principal_profile,
agent_pool_profiles=[agentpool_default],
linux_profile=linux_profile,
- enable_rbac=True,
+ enable_rbac=self.enable_rbac,
tags={'createdAt': datetime.now(tz=timezone.utc).isoformat()},
)
|
Enable RBAC support for AKS;
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -12,7 +12,7 @@ if 'upload' in sys.argv:
install_reqs = [
'matplotlib>=1.4.0',
'numpy>=1.9.1',
- 'pandas>=0.16.1',
+ 'pandas>=0.18.0',
'scipy>=0.14.0',
'seaborn>=0.6.0',
'statsmodels>=0.6.1',
|
Changed pandas version to be compatible with rolling change
|
py
|
diff --git a/unixccompiler.py b/unixccompiler.py
index <HASH>..<HASH> 100644
--- a/unixccompiler.py
+++ b/unixccompiler.py
@@ -20,6 +20,7 @@ __revision__ = "$Id$"
import string, re, os
from types import *
from copy import copy
+from distutils import sysconfig
from distutils.dep_util import newer
from distutils.ccompiler import \
CCompiler, gen_preprocess_options, gen_lib_options
@@ -249,7 +250,23 @@ class UnixCCompiler (CCompiler):
return "-L" + dir
def runtime_library_dir_option (self, dir):
- return "-R" + dir
+ # XXX Hackish, at the very least. See Python bug #445902:
+ # http://sourceforge.net/tracker/index.php
+ # ?func=detail&aid=445902&group_id=5470&atid=105470
+ # Linkers on different platforms need different options to
+ # specify that directories need to be added to the list of
+ # directories searched for dependencies when a dynamic library
+ # is sought. GCC has to be told to pass the -R option through
+ # to the linker, whereas other compilers just know this.
+ # Other compilers may need something slightly different. At
+ # this time, there's no way to determine this information from
+ # the configuration data stored in the Python installation, so
+ # we use this hack.
+ compiler = os.path.basename(sysconfig.get_config_var("CC"))
+ if compiler == "gcc" or compiler == "g++":
+ return "-Wl,-R" + dir
+ else:
+ return "-R" + dir
def library_option (self, lib):
return "-l" + lib
|
When using GCC, use the right option to add a directory to the list of dirs searched for a dependency for runtime linking. This closes SF bug #<I>.
|
py
|
diff --git a/TwitterSearch/TwitterSearch.py b/TwitterSearch/TwitterSearch.py
index <HASH>..<HASH> 100644
--- a/TwitterSearch/TwitterSearch.py
+++ b/TwitterSearch/TwitterSearch.py
@@ -53,6 +53,7 @@ class TwitterSearch(object):
# init internal variables
self.__response = {}
self.__nextMaxID = maxint
+ self._nextTweet = 0
if "proxy" in attr:
self.setProxy(attr["proxy"])
|
move _nextTweet initialization to constructor
|
py
|
diff --git a/example/ssd/demo.py b/example/ssd/demo.py
index <HASH>..<HASH> 100644
--- a/example/ssd/demo.py
+++ b/example/ssd/demo.py
@@ -91,7 +91,7 @@ if __name__ == '__main__':
detector = get_detector(args.network, args.prefix, args.epoch,
args.data_shape,
- (args.mean_r, args.mean_g, args.mean_b),
+ mx.nd.array((args.mean_r, args.mean_g, args.mean_b)).reshape((3,1,1)),
ctx, args.nms_thresh, args.force_nms)
# run detection
detector.detect_and_visualize(image_list, args.dir, args.extension,
|
Fixed reshaping of input, was erroring out (#<I>)
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100755
--- a/setup.py
+++ b/setup.py
@@ -33,7 +33,7 @@ setup(
"cached_property",
"dataclasses;python_version<'3.7'",
"docopt",
- "lazy_object_property",
+ "lazy_object_proxy",
"pendulum",
"pybooru",
"pygments",
|
Fix wrong dependency name in setup.py
|
py
|
diff --git a/discord/voice_client.py b/discord/voice_client.py
index <HASH>..<HASH> 100644
--- a/discord/voice_client.py
+++ b/discord/voice_client.py
@@ -62,6 +62,7 @@ from .errors import ClientException, InvalidArgument
class StreamPlayer(threading.Thread):
def __init__(self, stream, encoder, connected, player, after, **kwargs):
threading.Thread.__init__(self, **kwargs)
+ self.daemon = True
self.buff = stream
self.frame_size = encoder.frame_size
self.player = player
|
Make Player threads into daemon threads.
|
py
|
diff --git a/pghoard/encryptor.py b/pghoard/encryptor.py
index <HASH>..<HASH> 100644
--- a/pghoard/encryptor.py
+++ b/pghoard/encryptor.py
@@ -25,6 +25,8 @@ class EncryptorError(Exception):
class Encryptor(object):
def __init__(self, rsa_public_key_pem):
+ if isinstance(rsa_public_key_pem, str):
+ rsa_public_key_pem = rsa_public_key_pem.encode("utf-8")
self.rsa_public_key = serialization.load_pem_public_key(rsa_public_key_pem, backend=default_backend())
self.cipher = None
self.authenticator = None
@@ -60,6 +62,8 @@ class Encryptor(object):
class Decryptor(object):
def __init__(self, rsa_private_key_pem):
+ if isinstance(rsa_private_key_pem, str):
+ rsa_private_key_pem = rsa_private_key_pem.encode("utf-8")
self.rsa_private_key = serialization.load_pem_private_key(rsa_private_key_pem, password=None, backend=default_backend())
self.cipher = None
self.authenticator = None
|
encryptor: accept key data in both str and bytes
|
py
|
diff --git a/djangocms_installer/django/__init__.py b/djangocms_installer/django/__init__.py
index <HASH>..<HASH> 100644
--- a/djangocms_installer/django/__init__.py
+++ b/djangocms_installer/django/__init__.py
@@ -227,8 +227,8 @@ def load_starting_page(config_data):
Load starting page into the CMS
"""
with chdir(config_data.project_directory):
- subprocess.check_call(
- "DJANGO_SETTINGS_MODULE=%s.settings python starting_page.py" %
- config_data.project_name, shell=True)
- subprocess.check_call(['rm', '-f', 'starting_page.py',
- 'starting_page.pyc', 'starting_page.json'])
+ os.environ['DJANGO_SETTINGS_MODULE'] = '{0}.settings'.format(config_data.project_name)
+ subprocess.check_call(["python", "starting_page.py"], shell=True)
+ os.remove('starting_page.py')
+ os.remove('starting_page.pyc')
+ os.remove('starting_page.json')
|
Made load_starting_page() OS agnostic. The previous implementation would crash on Windows due to the fact that 'DJANGO_SETTINGS_MODULE' is not a valid command.
|
py
|
diff --git a/thingstance/thing.py b/thingstance/thing.py
index <HASH>..<HASH> 100644
--- a/thingstance/thing.py
+++ b/thingstance/thing.py
@@ -1,10 +1,4 @@
-from hashlib import sha1
-
-
-def hash(data):
- head = str("blob " + str(len(data)) + "\0").encode("utf-8")
- digest = sha1(head + data)
- return digest.hexdigest()
+from .datatypes.digest import hash
class Thing(object):
@@ -14,5 +8,5 @@ class Thing(object):
@property
def hash(self):
- """A digest of the Thing; the git hash-object value of the json."""
+ """The git hash-object value of the json."""
return hash(self.json.encode("utf-8"))
|
Moved calculating hash to digest module
|
py
|
diff --git a/alertaclient/commands/cmd_send.py b/alertaclient/commands/cmd_send.py
index <HASH>..<HASH> 100644
--- a/alertaclient/commands/cmd_send.py
+++ b/alertaclient/commands/cmd_send.py
@@ -68,24 +68,7 @@ def cli(obj, resource, event, environment, severity, correlate, service, group,
except Exception as e:
click.echo('ERROR: JSON parse failure - {}'.format(e))
sys.exit(1)
- send_alert(
- resource=payload['resource'],
- event=payload['event'],
- environment=payload['environment'],
- severity=payload['severity'],
- correlate=payload['correlate'],
- service=payload['service'],
- group=payload['group'],
- value=payload['value'],
- text=payload['text'],
- tags=payload['tags'],
- attributes=payload['attributes'],
- origin=payload['origin'],
- type=payload['type'],
- timeout=payload['timeout'],
- raw_data=payload['rawData'],
- customer=payload['customer']
- )
+ send_alert(**payload)
sys.exit(0)
# read raw data from file or stdin
|
Simplify send API call for piping json input (#<I>)
|
py
|
diff --git a/flask_peewee/admin.py b/flask_peewee/admin.py
index <HASH>..<HASH> 100644
--- a/flask_peewee/admin.py
+++ b/flask_peewee/admin.py
@@ -214,11 +214,12 @@ class ModelAdmin(object):
def add(self):
Form = self.get_add_form()
+ instance = self.model()
if request.method == 'POST':
form = Form(request.form)
if form.validate():
- instance = self.save_model(self.model(), form, True)
+ instance = self.save_model(instance, form, True)
flash('New %s saved successfully' % self.get_display_name(), 'success')
return self.dispatch_save_redirect(instance)
else:
@@ -227,6 +228,7 @@ class ModelAdmin(object):
return render_template(self.templates['add'],
model_admin=self,
form=form,
+ instance=instance,
**self.get_extra_context()
)
|
Add instance to the context of add view
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -13,7 +13,7 @@ if sys.version_info <= (3,6):
setup(
name="moneywagon",
- version='1.14.3',
+ version='1.14.4',
description='Next Generation Cryptocurrency Platform',
long_description=open('README.md').read(),
author='Chris Priest',
|
'Made release <I>
|
py
|
diff --git a/hooks/post_gen_project.py b/hooks/post_gen_project.py
index <HASH>..<HASH> 100644
--- a/hooks/post_gen_project.py
+++ b/hooks/post_gen_project.py
@@ -25,11 +25,6 @@ except NotImplementedError:
PROJECT_DIR_PATH = os.path.realpath(os.path.curdir)
-def remove_file(file_path):
- if os.path.exists(file_path):
- os.remove(file_path)
-
-
def remove_open_source_project_only_files():
file_names = [
'CONTRIBUTORS.txt',
@@ -75,11 +70,11 @@ def remove_heroku_files():
'requirements.txt',
]
for file_name in file_names:
- remove_file(os.path.join(PROJECT_DIR_PATH, file_name))
+ os.remove(os.path.join(PROJECT_DIR_PATH, file_name))
def remove_dotenv_file():
- remove_file(os.path.join(PROJECT_DIR_PATH, '.env'))
+ os.remove(os.path.join(PROJECT_DIR_PATH, '.env'))
def remove_grunt_files():
|
Get rid of remove_file() from post hook
|
py
|
diff --git a/git/cmd.py b/git/cmd.py
index <HASH>..<HASH> 100644
--- a/git/cmd.py
+++ b/git/cmd.py
@@ -92,11 +92,16 @@ def handle_process_output(process, stdout_handler, stderr_handler, finalizer, de
cmdline = getattr(process, 'args', '') # PY3+ only
if not isinstance(cmdline, (tuple, list)):
cmdline = cmdline.split()
+
+ pumps = []
+ if process.stdout:
+ pumps.append(('stdout', process.stdout, stdout_handler))
+ if process.stderr:
+ pumps.append(('stderr', process.stderr, stderr_handler))
+
threads = []
- for name, stream, handler in (
- ('stdout', process.stdout, stdout_handler),
- ('stderr', process.stderr, stderr_handler),
- ):
+
+ for name, stream, handler in pumps:
t = threading.Thread(target=pump_stream,
args=(cmdline, name, stream, decode_streams, handler))
t.setDaemon(True)
|
pumps: FIX don't pump when proc has no streams
|
py
|
diff --git a/pysat/_files.py b/pysat/_files.py
index <HASH>..<HASH> 100644
--- a/pysat/_files.py
+++ b/pysat/_files.py
@@ -172,17 +172,13 @@ class Files(object):
else:
return data
- def refresh(self, store=False):
+ def refresh(self):
"""Refresh loaded instrument filelist by searching filesystem.
Searches pysat provided path, pysat_data_dir/platform/name/tag/,
where pysat_data_dir is set by pysat.utils.set_data_dir(path=path).
- # Parameters
- # ----------
- # store : boolean
- # set True to store loaded file names into .pysat directory
-
+
"""
info = self._sat._list_rtn(tag=self._sat.tag, data_path=self.data_path)
info = self._remove_data_dir_path(info)
|
removed store option from refresh, a call to _store always made now; logic for storage moved into _store.
|
py
|
diff --git a/salt/modules/file.py b/salt/modules/file.py
index <HASH>..<HASH> 100644
--- a/salt/modules/file.py
+++ b/salt/modules/file.py
@@ -3541,7 +3541,8 @@ def set_selinux_context(path,
.. code-block:: bash
- salt '*' file.set_selinux_context path <role> <type> <range>
+ salt '*' file.set_selinux_context path <user> <role> <type> <range>
+ salt '*' file.set_selinux_context /etc/yum.repos.d/epel-release system_u object_r system_conf_t s0
'''
if not any((user, role, type, range)):
return False
|
Added the <user> declaration for the SELinux context to the documentation. Added an example of the module actually being used.
|
py
|
diff --git a/salt/cloud/__init__.py b/salt/cloud/__init__.py
index <HASH>..<HASH> 100644
--- a/salt/cloud/__init__.py
+++ b/salt/cloud/__init__.py
@@ -62,7 +62,7 @@ class CloudClient(object):
'''
Execute a cloud method in a multiprocess and fire the return on the event bus
'''
- salt.utils.daemonize()
+ salt.utils.daemonize(False)
event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
data = {'fun': 'cloud.{0}'.format(fun),
'jid': jid,
|
Don't redirect out when making sub-daemons
|
py
|
diff --git a/odl/space/cartesian.py b/odl/space/cartesian.py
index <HASH>..<HASH> 100644
--- a/odl/space/cartesian.py
+++ b/odl/space/cartesian.py
@@ -841,7 +841,12 @@ class Ntuples(Set):
>>> x
Ntuples(2, dtype('int8')).element([0, 1])
"""
- self.data[indices] = values
+ # TODO: do a real compatibility check
+ try:
+ return self.data.__setitem__(
+ indices, values.data.__getitem__(indices))
+ except AttributeError:
+ return self.data.__setitem__(indices, values)
def __eq__(self, other):
"""`vec.__eq__(other) <==> vec == other`."""
|
Fixed the slow `__setitem__` method, see issue #<I>
|
py
|
diff --git a/tests/unit/modules/test_ssh.py b/tests/unit/modules/test_ssh.py
index <HASH>..<HASH> 100644
--- a/tests/unit/modules/test_ssh.py
+++ b/tests/unit/modules/test_ssh.py
@@ -92,7 +92,7 @@ class SSHAuthKeyTestCase(TestCase, LoaderModuleMockMixin):
empty_line = '\n'
comment_line = '# this is a comment \n'
# Write out the authorized key to a temporary file
- temp_file = tempfile.NamedTemporaryFile(delete=False, mode='w+')
+ temp_file = tempfile.NamedTemporaryFile(delete=False)
# Add comment
temp_file.write(comment_line)
# Add empty line for #41335
|
Fix unit tests for ssh
|
py
|
diff --git a/metpy/plots/skewt.py b/metpy/plots/skewt.py
index <HASH>..<HASH> 100644
--- a/metpy/plots/skewt.py
+++ b/metpy/plots/skewt.py
@@ -154,6 +154,16 @@ class SkewT(object):
self.ax.xaxis.set_major_locator(MultipleLocator(10))
self.ax.set_xlim(-50, 50)
+ def plot_barbs(self, p, u, v, xloc=1.0, **kwargs):
+ # Assemble array of x-locations in axes space
+ x = np.empty_like(p)
+ x.fill(xloc)
+
+ # Do barbs plot at this location
+ self.ax.barbs(x, p, u, v,
+ transform=self.ax.get_yaxis_transform(which='tick2'),
+ clip_on=False, **kwargs)
+
def plot_dry_adiabats(self, T0=None, P=None, **kwargs):
# Determine set of starting temps if necessary
if T0 is None:
|
Add plotting of wind barbs to skewT plot.
|
py
|
diff --git a/uncommitted/__init__.py b/uncommitted/__init__.py
index <HASH>..<HASH> 100644
--- a/uncommitted/__init__.py
+++ b/uncommitted/__init__.py
@@ -68,6 +68,12 @@ contribute additional detection and scanning routines.
Changelog
---------
+**2.3** (2020 Apr 9)
+
+- Bugfix: the regular expression that matches the name of a git
+ submodule would get confused if the submodule's directory name itself
+ had parens in it.
+
**2.2** (2020 Feb 4)
- Improved Windows compatibility, thanks to Fabio Leimgruber!
@@ -143,4 +149,4 @@ Changelog
.. _Eapen: http://eapen.in
"""
-__version__ = '2.2'
+__version__ = '2.3'
|
Get ready for release of <I>
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -82,7 +82,7 @@ class CythonClean(clean):
clean.run(self)
# Find list of files with .c extension
- flist, flist_path = read_files("cython/core", ".c")
+ flist, flist_path = read_files("cython_temp", ".c")
# Clean files with .c extensions
if flist_path:
|
Update cython temp directory in setup.py clean hook
|
py
|
diff --git a/indra/tools/assemble_corpus.py b/indra/tools/assemble_corpus.py
index <HASH>..<HASH> 100644
--- a/indra/tools/assemble_corpus.py
+++ b/indra/tools/assemble_corpus.py
@@ -237,7 +237,6 @@ def run_preassembly_related(preassembler, beliefengine, **kwargs):
stmts_out = preassembler.combine_related(return_toplevel=False,
poolsize=poolsize,
size_cutoff=size_cutoff)
- logger.debug("Time elapsed, combine_related: %s" % elapsed)
beliefengine.set_hierarchy_probs(stmts_out)
stmts_top = filter_top_level(stmts_out)
if return_toplevel:
|
Remove time logging in assemble corpus preassembly
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -14,6 +14,6 @@ setup(name='dingo',
'geoalchemy2',
#'matplotlib', #should be included but fails via pip3
'oemof',
- 'ego.io',
+ 'ego.io >= 0.0.1-pre',
'oemof.db']
)
|
Update setup.py with exact version
|
py
|
diff --git a/faker/providers/date_time/__init__.py b/faker/providers/date_time/__init__.py
index <HASH>..<HASH> 100644
--- a/faker/providers/date_time/__init__.py
+++ b/faker/providers/date_time/__init__.py
@@ -1376,6 +1376,18 @@ class Provider(BaseProvider):
'continent': 'Europe',
'name': 'United Kingdom',
'capital': 'London'},
+ {'timezones': ['Asia/Taipei'],
+ 'alpha-2-code': 'TW',
+ 'alpha-3-code': 'TWN',
+ 'continent': 'Asia',
+ 'name': 'Taiwan',
+ 'capital': 'Taipei'},
+ {'timezones': ['Asia/Gaza', 'Asia/Hebron'],
+ 'alpha-2-code': 'PS',
+ 'alpha-3-code': 'PSE',
+ 'continent': 'Asia',
+ 'name': 'Palestine',
+ 'capital': 'Ramallah'},
]
regex = re.compile(timedelta_pattern)
|
Add Taiwanese and Palestinian time zones. (#<I>)
|
py
|
diff --git a/pypeerassets/networks.py b/pypeerassets/networks.py
index <HASH>..<HASH> 100644
--- a/pypeerassets/networks.py
+++ b/pypeerassets/networks.py
@@ -23,10 +23,10 @@ For abbreviation prefix testnet of the network with "t".
networks = (
# Peercoin mainnet
Network("peercoin", "ppc", b'37', b'b7', b'75', b'e6e8e9e5',
- b'\x17PPCoin Signed Message:\n', float(0.01), float(0.01), True, 1000000),
+ b'\x17PPCoin Signed Message:\n', 1000000, float(0.01), True, 1000000),
# Peercoin testnet
Network("peercoin-testnet", "tppc", b'6f', b'ef', b'c4', b'cbf2c0ef',
- b'\x17PPCoin Signed Message:\n', float(0.01), float(0.01), True, 1000000),
+ b'\x17PPCoin Signed Message:\n', 1000000, float(0.01), True, 1000000),
# Bitcoin mainnet
Network("bitcoin", "btc", b'00', b'80', b'05', b'd9b4bef9',
b'\x18Bitcoin Signed Message:\n', 0, 0, False, 100000000),
|
express in satoshis.
|
py
|
diff --git a/rope/refactor/patchedast.py b/rope/refactor/patchedast.py
index <HASH>..<HASH> 100644
--- a/rope/refactor/patchedast.py
+++ b/rope/refactor/patchedast.py
@@ -125,7 +125,10 @@ class _PatchingASTWalker(object):
# semicolon in except
region = self.source.consume_except_as_or_semicolon()
else:
- region = self.source.consume(child, skip_comment=not isinstance(node, (ast.JoinedStr, ast.FormattedValue)))
+ if hasattr(ast, 'JoinedStr') and isinstance(node, (ast.JoinedStr, ast.FormattedValue)):
+ region = self.source.consume_joined_string(child)
+ else:
+ region = self.source.consume(child)
child = self.source[region[0]:region[1]]
token_start = region[0]
if not first_token:
@@ -823,6 +826,11 @@ class _Source(object):
self.offset = new_offset + len(token)
return (new_offset, self.offset)
+ def consume_joined_string(self, token):
+ new_offset = self.source.index(token, self.offset)
+ self.offset = new_offset + len(token)
+ return (new_offset, self.offset)
+
def consume_string(self, end=None):
if _Source._string_pattern is None:
string_pattern = codeanalyze.get_string_pattern()
|
Refactor consume_joined_string and also fix missing ast.JoinedStr/FormattedValue in older python
|
py
|
diff --git a/bumpversion/__init__.py b/bumpversion/__init__.py
index <HASH>..<HASH> 100644
--- a/bumpversion/__init__.py
+++ b/bumpversion/__init__.py
@@ -92,7 +92,7 @@ class Git(object):
@classmethod
def add_path(cls, path):
- subprocess.check_call(["git", "add", path])
+ subprocess.check_call(["git", "add", "--update", path])
@classmethod
def commit(cls, message):
|
git: don't add files that aren't already tracked Fixes #<I>
|
py
|
diff --git a/warehouse/accounts/services.py b/warehouse/accounts/services.py
index <HASH>..<HASH> 100644
--- a/warehouse/accounts/services.py
+++ b/warehouse/accounts/services.py
@@ -303,6 +303,11 @@ class DatabaseUserService:
"warehouse.authentication.two_factor.failure",
tags=tags + ["failure_reason:no_totp"],
)
+ # If we've gotten here, then we'll want to record a failed attempt in our
+ # rate limiting before returning False to indicate a failed totp
+ # verification.
+ self.ratelimiters["user"].hit(user_id)
+ self.ratelimiters["global"].hit()
return False
valid = otp.verify_totp(totp_secret, totp_value)
@@ -314,6 +319,11 @@ class DatabaseUserService:
"warehouse.authentication.two_factor.failure",
tags=tags + ["failure_reason:invalid_totp"],
)
+ # If we've gotten here, then we'll want to record a failed attempt in our
+ # rate limiting before returning False to indicate a failed totp
+ # verification.
+ self.ratelimiters["user"].hit(user_id)
+ self.ratelimiters["global"].hit()
return valid
|
actually hit ratelimiter on failed TOTP attempts (#<I>) follows up on #<I>, making it work as designed
|
py
|
diff --git a/vyper/parser/expr.py b/vyper/parser/expr.py
index <HASH>..<HASH> 100644
--- a/vyper/parser/expr.py
+++ b/vyper/parser/expr.py
@@ -175,8 +175,19 @@ class Expr(object):
const = self.context.constants[self.expr.id]
expr = Expr.parse_value_expr(const.value, self.context)
annotation_type = parse_type(const.annotation.args[0], None, custom_units=self.context.custom_units)
+
+ fail = False
+
if expr.typ != annotation_type:
+ fail = True
+ # special case for literals, which can be uint256 types as well.
+ if isinstance(annotation_type, BaseType) and annotation_type.typ == 'uint256' and \
+ expr.typ.typ == 'int128' and SizeLimits.in_bounds('uint256', expr.value):
+ fail = False
+
+ if fail:
raise StructureException('Invalid value for constant type, expected %r' % annotation_type, const.value)
+
else:
expr.typ = annotation_type
return expr
|
Fix constants check for integer based literals.
|
py
|
diff --git a/pyethereum/processblock.py b/pyethereum/processblock.py
index <HASH>..<HASH> 100644
--- a/pyethereum/processblock.py
+++ b/pyethereum/processblock.py
@@ -684,7 +684,7 @@ def apply_op(block, tx, msg, processed_code, compustate):
elif op[:3] == 'LOG':
depth = int(op[3:])
mstart, msz = stk.pop(), stk.pop()
- topics = [stk.pop() for x in range(depth)]
+ topics = list(set(stk.pop() for x in range(depth)))
compustate.gas -= msz
if not mem_extend(mem, compustate, op, mstart, msz):
return vm_exception('OOG EXTENDING MEMORY')
|
Log.topics as a set
|
py
|
diff --git a/nipap-cli/nipap_cli/nipap_cli.py b/nipap-cli/nipap_cli/nipap_cli.py
index <HASH>..<HASH> 100755
--- a/nipap-cli/nipap_cli/nipap_cli.py
+++ b/nipap-cli/nipap_cli/nipap_cli.py
@@ -530,7 +530,10 @@ def remove_prefix(arg, opts):
recursive = True
spec = { 'prefix': arg }
- v = get_vrf(opts.get('vrf'), abort=True)
+ if opts.get('vrf') is None:
+ v = get_vrf('none', abort=True)
+ else:
+ v = get_vrf(opts.get('vrf'), abort=True)
spec['vrf_rt'] = v.rt
res = Prefix.list(spec)
|
Default remove_prefix to VRF none It is possible to add prefixes without specifying a VRF, in which case it defaults to the default VRF. On the basis that it should be easy to start using NIPAP we thus default remove_prefix to operating on VRF none as well. Users don't really have to know about VRFs to start using NIPAP now :) Fixes #<I>
|
py
|
diff --git a/better_exchook.py b/better_exchook.py
index <HASH>..<HASH> 100644
--- a/better_exchook.py
+++ b/better_exchook.py
@@ -98,6 +98,11 @@ def parse_py_statement(line):
if state == 3: yield ("id", curtoken)
elif state == 6: yield ("comment", curtoken)
+def parse_py_statements(source_code):
+ for line in source_code.splitlines():
+ for t in parse_py_statement(line):
+ yield t
+
import keyword
pykeywords = set(keyword.kwlist) | set(["None", "True", "False"])
@@ -241,7 +246,7 @@ def is_source_code_missing_open_brackets(source_code):
counters = [0] * len(open_brackets)
# Go in reverse order through the tokens.
# Thus, we first should see the closing brackets, and then the matching opening brackets.
- for t_type, t_content in reversed(list(parse_py_statement(source_code))):
+ for t_type, t_content in reversed(list(parse_py_statements(source_code))):
if t_type != "op": continue # we are from now on only interested in ops (including brackets)
if t_content in open_brackets:
idx = open_brackets.index(t_content)
|
small fix for comments in multi line statement
|
py
|
diff --git a/tensorflow_probability/python/layers/distribution_layer.py b/tensorflow_probability/python/layers/distribution_layer.py
index <HASH>..<HASH> 100644
--- a/tensorflow_probability/python/layers/distribution_layer.py
+++ b/tensorflow_probability/python/layers/distribution_layer.py
@@ -47,7 +47,7 @@ from tensorflow_probability.python.distributions import transformed_distribution
from tensorflow_probability.python.distributions import variational_gaussian_process as variational_gaussian_process_lib
from tensorflow_probability.python.internal import distribution_util as dist_util
from tensorflow_probability.python.layers.internal import distribution_tensor_coercible as dtc
-from tensorflow_probability.python.layers.internal import tensor_tuple as tensor_tuple
+from tensorflow_probability.python.layers.internal import tensor_tuple
from tensorflow.python.keras.utils import tf_utils as keras_tf_utils # pylint: disable=g-direct-tensorflow-import
|
Eliminate no-op import alias '...tensor_tuple as tensor_tuple' PiperOrigin-RevId: <I>
|
py
|
diff --git a/MAVProxy/modules/mavproxy_kmlread.py b/MAVProxy/modules/mavproxy_kmlread.py
index <HASH>..<HASH> 100644
--- a/MAVProxy/modules/mavproxy_kmlread.py
+++ b/MAVProxy/modules/mavproxy_kmlread.py
@@ -149,19 +149,19 @@ class KmlReadModule(mp_module.MPModule):
self.mpstate.map.remove_object(layer)
self.curlayers.remove(layername)
if layername in self.curtextlayers:
- for layer in self.curtextlayers:
- if layer == layername:
- self.mpstate.map.remove_object(layer)
- self.curtextlayers.remove(layer)
+ for clayer in self.curtextlayers:
+ if clayer == layername:
+ self.mpstate.map.remove_object(clayer)
+ self.curtextlayers.remove(clayer)
#toggle layer on (plus associated text element)
else:
for layer in self.allayers:
if layer.key == layername:
self.mpstate.map.add_object(layer)
self.curlayers.append(layername)
- for layer in self.alltextlayers:
- if layer.key == layername:
- self.mpstate.map.add_object(layer)
+ for alayer in self.alltextlayers:
+ if alayer.key == layername:
+ self.mpstate.map.add_object(alayer)
self.curtextlayers.append(layername)
self.menu_needs_refreshing = True
|
kmlread: correct layer variable already declared in for loop previously
|
py
|
diff --git a/py509/x509.py b/py509/x509.py
index <HASH>..<HASH> 100644
--- a/py509/x509.py
+++ b/py509/x509.py
@@ -78,3 +78,17 @@ def make_certificate_authority(**name):
csr = make_certificate_signing_request(key, **name)
crt = make_certificate(csr, key, csr, make_serial(), 0, 10 * 365 * 24 * 60 * 60, exts=[crypto.X509Extension(b'basicConstraints', True, b'CA:TRUE')])
return key, crt
+
+
+def load_x509_certificates(buf):
+ """Load one or multiple X.509 certificates from a buffer.
+
+ :param buf: A buffer is an instance of `basestring` and can contain multiple
+ certificates.
+
+ """
+ if not isinstance(buf, basestring):
+ raise ValueError('`buf` should be an instance of `basestring` not `%s`' % type(buf))
+
+ for pem in re.findall('(-----BEGIN CERTIFICATE-----\s(\S+\n*)+\s-----END CERTIFICATE-----\s)', buf):
+ yield crypto.load_certificate(crypto.FILETYPE_PEM, pem[0])
|
Add a method to load multiple certificates. Useful, e.g., when loading a ca-bundle.crt file.
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100755
--- a/setup.py
+++ b/setup.py
@@ -27,11 +27,16 @@ opj = os.path.join
cythonize_dir = "build"
+macros = []
+
+if sys.platform == 'cygwin':
+ macros.append(('FD_SETSIZE', 512))
kwds = dict(include_dirs=[opj("src", "cysignals"),
opj(cythonize_dir, "src"),
opj(cythonize_dir, "src", "cysignals")],
- depends=glob(opj("src", "cysignals", "*.h")))
+ depends=glob(opj("src", "cysignals", "*.h")),
+ define_macros=macros)
extensions = [
Extension("cysignals.signals", ["src/cysignals/signals.pyx"], **kwds),
|
Increase FD_SETSIZE from its default of <I> on Cygwin
|
py
|
diff --git a/ppb/features/twophase.py b/ppb/features/twophase.py
index <HASH>..<HASH> 100644
--- a/ppb/features/twophase.py
+++ b/ppb/features/twophase.py
@@ -41,7 +41,7 @@ class TwoPhaseMixin(EventMixin):
self.__staged_changes = {}
self.__staged_changes.update(kwargs)
- def do_commit(self, event, signal):
+ def on_commit(self, event, signal):
"""
Commit changes previously staged.
"""
|
twophase: Typo: That was supposed to be an event handler
|
py
|
diff --git a/gwpy/plotter/html.py b/gwpy/plotter/html.py
index <HASH>..<HASH> 100644
--- a/gwpy/plotter/html.py
+++ b/gwpy/plotter/html.py
@@ -25,6 +25,7 @@ import numpy
from matplotlib.collections import Collection
from matplotlib.figure import Figure
+from matplotlib.lines import Line2D
from ..types import Series
@@ -146,8 +147,10 @@ def map_artist(artist, filename, mapname='points', shape='circle',
z = artist.get_array()
if z is not None:
data = numpy.vstack((data[:, 0], data[:, 1], z)).T
+ elif isinstance(artist, Line2D):
+ data = numpy.asarray(artist.get_data()).T
else:
- data = artist.get_data()
+ data = numpy.asarray(artist.get_data())
return _map(data, axes, filename, mapname=mapname, shape=shape,
popup=popup, title=title, standalone=standalone, jquery=jquery)
|
plotter.html.map_artist: fixed bug in mapping lines
|
py
|
diff --git a/doc/conf.py b/doc/conf.py
index <HASH>..<HASH> 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -333,9 +333,9 @@ texinfo_documents = [
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
- 'matplotlib': ('https://matplotlib.org/', None),
+ 'matplotlib': ('https://matplotlib.org/stable', None),
'numpy': ('https://numpy.org/doc/stable', None),
- 'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
+ 'scipy': ('https://docs.scipy.org/doc/scipy', None),
}
|
MAINT: Correct links for intersphinx crossrefs
|
py
|
diff --git a/pliers/converters/base.py b/pliers/converters/base.py
index <HASH>..<HASH> 100644
--- a/pliers/converters/base.py
+++ b/pliers/converters/base.py
@@ -55,7 +55,8 @@ def get_converter(in_type, out_type, *args, **kwargs):
if not issubclass(cls, Converter):
continue
concrete = len(cls.__abstractmethods__) == 0
- if cls._input_type == in_type and cls._output_type == out_type and concrete:
+ if cls._input_type == in_type and cls._output_type == out_type and \
+ concrete and cls.available:
try:
conv = cls(*args, **kwargs)
return conv
|
in get_converters, make sure converter is environment-ready
|
py
|
diff --git a/tests/system/providers/amazon/aws/example_rds_export.py b/tests/system/providers/amazon/aws/example_rds_export.py
index <HASH>..<HASH> 100644
--- a/tests/system/providers/amazon/aws/example_rds_export.py
+++ b/tests/system/providers/amazon/aws/example_rds_export.py
@@ -126,6 +126,8 @@ with DAG(
s3_prefix='rds-test',
iam_role_arn=test_context[ROLE_ARN_KEY],
kms_key_id=test_context[KMS_KEY_ID_KEY],
+ # Waits by default, set False to test the CancelExportTaskOperator below
+ wait_for_completion=False,
)
# [END howto_operator_rds_start_export_task]
|
Adds a wait to prevent a race condition (#<I>)
|
py
|
diff --git a/insteonplm/devices/dimmableLightingControl.py b/insteonplm/devices/dimmableLightingControl.py
index <HASH>..<HASH> 100644
--- a/insteonplm/devices/dimmableLightingControl.py
+++ b/insteonplm/devices/dimmableLightingControl.py
@@ -219,7 +219,13 @@ class DimmableLightingControl_2475F(DimmableLightingControl):
self.log.debug('Ending DimmableLightingControl_2475F._fan_status_update_received')
def _light_on_command_received(self, msg):
- light_status_request()
+ device1 = self._plm.devices[self._get_device_id(0x01)]
+ device2 = self._plm.devices[self._get_device_id(0x02)]
+ device1.light_status_request()
+ device2.ligth_status_request()
def _light_off_command_received(self, msg):
- light_status_request()
\ No newline at end of file
+ device1 = self._plm.devices[self._get_device_id(0x01)]
+ device2 = self._plm.devices[self._get_device_id(0x02)]
+ device1.light_status_request()
+ device2.ligth_status_request()
\ No newline at end of file
|
Fixed issues with manual changes to state for FanLinc
|
py
|
diff --git a/buildbot/status/web/build.py b/buildbot/status/web/build.py
index <HASH>..<HASH> 100644
--- a/buildbot/status/web/build.py
+++ b/buildbot/status/web/build.py
@@ -107,17 +107,22 @@ class StatusResourceBuild(HtmlResource):
data += "<ol>\n"
for s in b.getSteps():
name = s.getName()
+ time_to_run = 0
+ (start, end) = s.getTimes()
+ if start and end:
+ time_to_run = end - start
if s.isFinished():
css_class = css_classes[s.getResults()[0]]
elif s.isStarted():
css_class = "running"
else:
css_class = ""
- data += (' <li><span class="%s"><a href=\"%s\">%s</a> [%s]</span>\n'
+ data += (' <li><span class="%s"><a href=\"%s\">%s</a> [%s] [%d seconds]</span>\n'
% (css_class,
req.childLink("steps/%s" % urllib.quote(name)),
name,
- " ".join(s.getText())))
+ " ".join(s.getText()),
+ time_to_run))
if s.getLogs():
data += " <ol>\n"
for logfile in s.getLogs():
|
(fixes #<I>) chromium patch to show step times
|
py
|
diff --git a/vext/install/__init__.py b/vext/install/__init__.py
index <HASH>..<HASH> 100644
--- a/vext/install/__init__.py
+++ b/vext/install/__init__.py
@@ -18,7 +18,7 @@ DEFAULT_PTH_CONTENT = """\
#
# Lines beginning with 'import' are executed, so import sys to get
# going.
-import os; import sys; exec("try:\n from vext.gatekeeper import install_importer;install_importer()\nexcept:sys.stderr.write('An error occured while enabling VEXT'); raise;")
+import os; from vext.gatekeeper import install_importer; install_importer()
"""
|
Another attempt at updating the pth
|
py
|
diff --git a/tests/test_recipes.py b/tests/test_recipes.py
index <HASH>..<HASH> 100644
--- a/tests/test_recipes.py
+++ b/tests/test_recipes.py
@@ -630,7 +630,7 @@ class NthPermutationTests(TestCase):
for index in [-1 - n, n + 1]:
with self.assertRaises(IndexError):
mi.nth_combination(iterable, r, index)
-
+
def test_invalid_r(self):
iterable = 'abcde'
r = 4
|
removed extra whitespace on <I>
|
py
|
diff --git a/tests/test_betfairstream.py b/tests/test_betfairstream.py
index <HASH>..<HASH> 100644
--- a/tests/test_betfairstream.py
+++ b/tests/test_betfairstream.py
@@ -46,7 +46,7 @@ class BetfairStreamTest(unittest.TestCase):
mock_read_loop.assert_called_with()
self.betfair_stream.start(async=True)
- mock_threading.Thread.assert_called_with(daemon=False, name=self.description, target=mock_read_loop)
+ mock_threading.Thread.assert_called_with(name=self.description, target=mock_read_loop)
self.betfair_stream._running = False
self.betfair_stream.start(async=False)
|
Fix test - Thread not called with daemon kwarg Following python <I> compatibility change in c<I>d<I>a3d8c<I>aeccb9db5c7eb<I>d<I>f<I>b the creation of Thread is no longer called with the daemon kwarg so the daemon kwarg has been removed from assert_called_with arguments as it is no longer true.
|
py
|
diff --git a/salt/modules/state.py b/salt/modules/state.py
index <HASH>..<HASH> 100644
--- a/salt/modules/state.py
+++ b/salt/modules/state.py
@@ -301,8 +301,7 @@ def apply_(mods=None,
<salt.modules.state.sls>` based on the arguments passed to this function.
It exists as a more intuitive way of applying states.
-
- **APPLYING ALL STATES CONFIGURED IN TOP.SLS**
+ .. rubric:: APPLYING ALL STATES CONFIGURED IN TOP.SLS (A.K.A. :ref:`HIGHSTATE <running-highstate>`)
To apply all configured states, simply run ``state.apply``:
@@ -346,7 +345,7 @@ def apply_(mods=None,
salt '*' state.apply localconfig=/path/to/minion.yml
- **APPLYING INDIVIDUAL SLS FILES**
+ .. rubric:: APPLYING INDIVIDUAL SLS FILES (A.K.A. :py:func:`STATE.SLS <salt.modules.state.sls>`)
To apply individual SLS files, pass them as a comma-separated list:
|
Add reference to state tutorial to state.apply docstring
|
py
|
diff --git a/telebot/types.py b/telebot/types.py
index <HASH>..<HASH> 100644
--- a/telebot/types.py
+++ b/telebot/types.py
@@ -762,7 +762,7 @@ class CallbackQuery(JsonDeserializable):
message = Message.de_json(obj['message'])
inline_message_id = obj.get('inline_message_id')
chat_instance = obj['chat_instance']
- data = obj['data']
+ data = obj.get('data')
game_short_name = obj.get('game_short_name')
return cls(id, from_user, data, chat_instance, message, inline_message_id, game_short_name)
|
Fixed KeyError when data field is None in CallbackQuery obj['data'] raises KeyError when `data` is None, while obj.get('data') returns None
|
py
|
diff --git a/master/buildbot/test/fake/openstack.py b/master/buildbot/test/fake/openstack.py
index <HASH>..<HASH> 100644
--- a/master/buildbot/test/fake/openstack.py
+++ b/master/buildbot/test/fake/openstack.py
@@ -140,5 +140,5 @@ class Instance():
# Parts used from novaclient.exceptions.
-class NotFound():
+class NotFound(Exception):
pass
|
NotFound must inherit from Exception for Python 3. This fixes the Python 3 error: "exceptions must derive from BaseException"
|
py
|
diff --git a/zipline/data/hdf5_daily_bars.py b/zipline/data/hdf5_daily_bars.py
index <HASH>..<HASH> 100644
--- a/zipline/data/hdf5_daily_bars.py
+++ b/zipline/data/hdf5_daily_bars.py
@@ -303,10 +303,14 @@ class HDF5DailyBarWriter(object):
float values. Default is None, in which case
DEFAULT_SCALING_FACTORS is used.
"""
- ohlcv_frame = pd.concat([df for sid, df in data])
+ sids, frames = zip(*data)
+ ohlcv_frame = pd.concat(frames)
+
+ # Repeat each sid for each row in its corresponding frame.
+ sid_ix = np.repeat(sids, [len(f) for f in frames])
# Add id to the index, so the frame is indexed by (date, id).
- ohlcv_frame.set_index('id', append=True, inplace=True)
+ ohlcv_frame.set_index(sid_ix, append=True, inplace=True)
frames = {
field: ohlcv_frame[field].unstack()
|
MAINT: Don't require an 'id' column in hdf5 daily bar writer. It's redundant since we're taking a (sid, frame) pair, and not all test fixtures set this column.
|
py
|
diff --git a/setup.py b/setup.py
index <HASH>..<HASH> 100644
--- a/setup.py
+++ b/setup.py
@@ -20,7 +20,7 @@ setup(
include_package_data = True,
# Package dependencies.
- install_requires = ['setuptools', 'simplejson'],
+ install_requires = ['simplejson'],
# Metadata for PyPI.
author = 'Ryan McGrath',
|
Don't need to include `setuptools`, heh.
|
py
|
diff --git a/c7n/resources/glacier.py b/c7n/resources/glacier.py
index <HASH>..<HASH> 100644
--- a/c7n/resources/glacier.py
+++ b/c7n/resources/glacier.py
@@ -34,6 +34,23 @@ class Glacier(QueryResourceManager):
@Glacier.filter_registry.register('cross-account')
class GlacierCrossAccountAccessFilter(CrossAccountAccessFilter):
+ """Filter to return all glacier vaults with cross account access permissions
+
+ The whitelist parameter will omit the accounts that match from the return
+
+ :example:
+
+ .. code-block:
+
+ policies:
+ - name: glacier-cross-account
+ resource: glacier
+ filters:
+ - type: cross-account
+ whitelist:
+ - permitted-account-01
+ - permitted-account-02
+ """
def process(self, resources, event=None):
def _augment(r):
|
adding docstrings to glacier.py (#<I>) * adding docstrings to glacier.py
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.