diff
stringlengths
139
3.65k
message
stringlengths
8
627
diff_languages
stringclasses
1 value
diff --git a/wal_e/retries.py b/wal_e/retries.py index <HASH>..<HASH> 100644 --- a/wal_e/retries.py +++ b/wal_e/retries.py @@ -3,6 +3,8 @@ import logging import sys import traceback +import gevent + import wal_e.log_help as log_help logger = log_help.WalELogger(__name__, level=logging.INFO) @@ -54,6 +56,9 @@ def retry(exception_processor=generic_exception_processor): exc_processor_cxt = None while True: + # Avoid livelocks while spinning on retry by yielding. + gevent.sleep(0) + try: return f(*args, **kwargs) except:
retries: avoid accidental livelocks Putting in an extra yield doesn't seem like it could hurt, but the alternative sure could. This was a defect of the pre-refactored version, too.
py
diff --git a/goless/backends.py b/goless/backends.py index <HASH>..<HASH> 100644 --- a/goless/backends.py +++ b/goless/backends.py @@ -1,5 +1,6 @@ import os + class Backend(object): def start(self, func, *args, **kwargs): """Starts a tasklet/greenlet.""" @@ -90,16 +91,18 @@ def _make_gevent(): return GeventBackend() _backends = { - "stackless": _make_stackless, - "gevent": _make_gevent + "stackless": _make_stackless, + "gevent": _make_gevent } current = None -GOLESS_BACKEND = os.getenv("GOLESS_BACKEND", None) -if GOLESS_BACKEND is not None: +GOLESS_BACKEND = os.getenv("GOLESS_BACKEND", '') +if GOLESS_BACKEND: if GOLESS_BACKEND not in _backends: - raise RuntimeError("Invalid backend specified. Valid backends are: %s" % _backends.keys()) + raise RuntimeError( + "Invalid backend %r specified. Valid backends are: %s" + % (GOLESS_BACKEND, _backends.keys())) current = _backends[GOLESS_BACKEND]() else: try:
GOLESS_BACKEND no longer compares against None, uses empty string. Minor formal cleanup.
py
diff --git a/tests/end_to_end/test_validation_end_to_end.py b/tests/end_to_end/test_validation_end_to_end.py index <HASH>..<HASH> 100644 --- a/tests/end_to_end/test_validation_end_to_end.py +++ b/tests/end_to_end/test_validation_end_to_end.py @@ -93,13 +93,13 @@ def test_end_to_end(tempdir): p = 15 q = 1 X, y, coef = make_regression( - n_samples=50, n_features=p, n_informative=q, coef=True, + n_samples=500, n_features=p, n_informative=q, coef=True, shuffle=True, random_state=1) # informative columns are 'A', 'B' # uninformative columns are 'Z_0', ..., 'Z_11' columns = [] - informative = list('DCBA') + informative = list('A') other = ['Z_{i}'.format(i=i) for i in reversed(range(p-q))] for i in range(p): if coef[i] == 0:
address some issues, up the sample size
py
diff --git a/sos/archive.py b/sos/archive.py index <HASH>..<HASH> 100644 --- a/sos/archive.py +++ b/sos/archive.py @@ -421,7 +421,7 @@ class FileCacheArchive(Archive): (source, link_name, dest)) source_dir = os.path.dirname(link_name) - host_path_name = os.path.normpath(os.path.join(source_dir, source)) + host_path_name = os.path.realpath(os.path.join(source_dir, source)) dest_path_name = self.dest_path(host_path_name) if not os.path.exists(dest_path_name):
[archive] canonicalise paths for link follow up Ensure that the canonical path is used when processing link follow up actions: the actual link path may contain one or more levels of symbolic links, leading to broken links if the link target path is assumed to be relative to the containing directory.
py
diff --git a/examples/perft/perft.py b/examples/perft/perft.py index <HASH>..<HASH> 100755 --- a/examples/perft/perft.py +++ b/examples/perft/perft.py @@ -18,8 +18,10 @@ import sys def perft(depth, board): - if board.is_variant_win() or board.is_variant_draw() or board.is_variant_loss(): + if depth < 1: return 1 + elif board.is_variant_win() or board.is_variant_draw() or board.is_variant_loss(): + return 0 elif depth > 1: count = 0 @@ -36,8 +38,10 @@ def perft(depth, board): def parallel_perft(pool, depth, board): - if board.is_variant_win() or board.is_variant_draw() or board.is_variant_loss(): + if depth < 1: return 1 + elif board.is_variant_win() or board.is_variant_draw() or board.is_variant_loss(): + return 0 elif depth > 1: def successors(board): for move in board.legal_moves:
Fix perft with regard to variant end
py
diff --git a/tests/test_service.py b/tests/test_service.py index <HASH>..<HASH> 100644 --- a/tests/test_service.py +++ b/tests/test_service.py @@ -129,7 +129,6 @@ class TestService: etesync.sync() # Reset the db - prev_db = etesync._database etesync._init_db(TEST_DB) assert len(list(etesync.list())) == 0 etesync.sync() @@ -141,8 +140,8 @@ class TestService: etesync.sync() # Reset the db - etesync._set_db(prev_db) - assert len(list(etesync.list())) == 2 + etesync._init_db(TEST_DB) + assert len(list(etesync.list())) == 0 etesync.sync() assert len(list(etesync.list())) == 1
Tests: fix broken tests that relied on undefined behaviour.
py
diff --git a/synapse/lib/ingest.py b/synapse/lib/ingest.py index <HASH>..<HASH> 100644 --- a/synapse/lib/ingest.py +++ b/synapse/lib/ingest.py @@ -221,7 +221,6 @@ class Ingest(EventBus): raise Exception('Ingest Info Not Found: %s' % (path,)) for data in self._openDataSorc(path,info): - #data = self._openDataSorc(path,info) root = s_datapath.DataPath(data) self._ingDataInfo(core, root, gest)
and another comment/cruft line
py
diff --git a/serenata_toolbox/chamber_of_deputies/speeches_dataset.py b/serenata_toolbox/chamber_of_deputies/speeches_dataset.py index <HASH>..<HASH> 100644 --- a/serenata_toolbox/chamber_of_deputies/speeches_dataset.py +++ b/serenata_toolbox/chamber_of_deputies/speeches_dataset.py @@ -35,7 +35,7 @@ class SpeechesDataset: xml = urllib.request.urlopen(url) tree = ET.ElementTree(file=xml) - records = self.__parse_speeches(tree.getroot()) + records = self._parse_speeches(tree.getroot()) return pd.DataFrame(records, columns=[ 'session_code', @@ -52,7 +52,7 @@ class SpeechesDataset: 'speech_insertion_num' ]) - def __parse_speeches(self, root): + def _parse_speeches(self, root): for session in root: session_code = xml_extract_text(session, 'codigo') session_date = xml_extract_date(session, 'data')
Remove double underscore from `__parse_speeches` method and calls
py
diff --git a/salt/returners/mysql.py b/salt/returners/mysql.py index <HASH>..<HASH> 100644 --- a/salt/returners/mysql.py +++ b/salt/returners/mysql.py @@ -246,6 +246,11 @@ def returner(ret): ''' Return data to a mysql server ''' + # if a minion is returning a standalone job, get a jobid + if ret['jid'] == 'req': + ret['jid'] = prep_jid(nocache=ret.get('nocache', False)) + save_load(ret['jid'], ret) + try: with _get_serv(ret, commit=True) as cur: sql = '''INSERT INTO `salt_returns`
Add jid=req handling for mysql returner. It should also store the return jid into the jid list table.
py
diff --git a/fabric/group.py b/fabric/group.py index <HASH>..<HASH> 100644 --- a/fabric/group.py +++ b/fabric/group.py @@ -2,7 +2,7 @@ from invoke.vendor.six.moves.queue import Queue from invoke.util import ExceptionHandlingThread -from fabric import Connection +from fabric import Connection, Result class Group(list): @@ -191,4 +191,29 @@ class GroupResult(dict): - Of note, these attributes allow high level logic, e.g. ``if mygroup.run('command').failed`` and so forth. """ - pass + def __init__(self, *args, **kwargs): + super(dict, self).__init__(*args, **kwargs) + self._successes = {} + self._failures = {} + + def _bifurcate(self): + # Short-circuit to avoid reprocessing every access. + if self._successes or self._failures: + return + # TODO: if we ever expect .succeeded/.failed to be useful before a + # GroupResult is fully initialized, this needs to become smarter. + for key, value in self.items(): + if isinstance(value, BaseException): + self._failures[key] = value + else: + self._successes[key] = value + + @property + def succeeded(self): + self._bifurcate() + return self._successes + + @property + def failed(self): + self._bifurcate() + return self._failures
Implement GroupResult.(succeeded/failed)
py
diff --git a/harpoon/actions.py b/harpoon/actions.py index <HASH>..<HASH> 100644 --- a/harpoon/actions.py +++ b/harpoon/actions.py @@ -80,8 +80,13 @@ def pull_arbitrary(collector, image, **kwargs): authentication = collector.configuration.get("authentication", NotSpecified) for index, (image, image_index) in enumerate(image_indexes): + tag = sb.NotSpecified + if ":" in image: + image, tag = image.split(":", 1) + image = { "image_name": image + , "tag": tag , "harpoon": collector.configuration["harpoon"] , "commands": ["FROM scratch"] , "image_index": image_index
Work out tag from image name when pulling all external
py
diff --git a/test_pal.py b/test_pal.py index <HASH>..<HASH> 100644 --- a/test_pal.py +++ b/test_pal.py @@ -350,7 +350,7 @@ class TestPAL(unittest.TestCase) : self.assertRaises( ValueError, pal.caldj, 1970, 13, 1 ) self.assertRaises( ValueError, pal.caldj, 1970, 1, 32 ) - def test_caf2r(self): + def test_daf2r(self): dr = pal.daf2r( 76, 54, 32.1 ) self.assertAlmostEqual( dr, 1.342313819975276, 12 )
fixed name of method test_daf2r
py
diff --git a/core/setup.py b/core/setup.py index <HASH>..<HASH> 100644 --- a/core/setup.py +++ b/core/setup.py @@ -25,7 +25,7 @@ setup(name='eo-learn-core', author_email='[email protected]', license='MIT', packages=find_packages(), - package_data={'eolearn': ['eolearn/core/report_templates']}, + package_data={'eolearn': ['core/report_templates/report.html']}, include_package_data=True, install_requires=parse_requirements("requirements.txt"), zip_safe=False)
Really include the report template in setup.py
py
diff --git a/cassiopeia/type/dto/common.py b/cassiopeia/type/dto/common.py index <HASH>..<HASH> 100755 --- a/cassiopeia/type/dto/common.py +++ b/cassiopeia/type/dto/common.py @@ -1,6 +1,10 @@ import json class CassiopeiaDto(object): + def __init__(self, dictionary): + for k,v in dictionary.items(): + setattr(self, k, v) + def to_json(self): return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4) @@ -14,4 +18,4 @@ class CassiopeiaDto(object): return self.__dict__ == other.__dict__ def __ne__(self, other): - return self.__dict__ != other.__dict__ \ No newline at end of file + return self.__dict__ != other.__dict__
made a tiny modification to add a default __init__ for CassiopeiaDto. this is (and should be) overwritten in everything inside of cassio but it lets you do some black magic to make everything mutable
py
diff --git a/charmhelpers/fetch/ubuntu.py b/charmhelpers/fetch/ubuntu.py index <HASH>..<HASH> 100644 --- a/charmhelpers/fetch/ubuntu.py +++ b/charmhelpers/fetch/ubuntu.py @@ -454,6 +454,9 @@ def _add_apt_repository(spec): :param spec: the parameter to pass to add_apt_repository """ + if '{series}' in spec: + series = lsb_release()['DISTRIB_CODENAME'] + spec = spec.replace('{series}', series) _run_with_retries(['add-apt-repository', '--yes', spec])
Add {series} support to _add_apt_repository (#<I>) Allows sources to include a `{series}` placeholder that will be populated automatically with the correct Ubuntu series. Closes #<I>
py
diff --git a/vendor/visualmetrics.py b/vendor/visualmetrics.py index <HASH>..<HASH> 100755 --- a/vendor/visualmetrics.py +++ b/vendor/visualmetrics.py @@ -549,9 +549,9 @@ def find_render_start(directory, orange_file, gray_file): right_margin = 10 bottom_margin = 25 if height > 400 or width > 400: - top = int(math.ceil(float(height) * 0.03)) - right_margin = int(math.ceil(float(width) * 0.04)) - bottom_margin = int(math.ceil(float(width) * 0.04)) + top = max(top, int(math.ceil(float(height) * 0.04))) + right_margin = max(right_margin, int(math.ceil(float(width) * 0.04))) + bottom_margin = max(bottom_margin, int(math.ceil(float(width) * 0.04))) height = max(height - top - bottom_margin, 1) left = 0 width = max(width - right_margin, 1)
merging upstream fixes (#<I>)
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ versioneer.VCS = "git" versioneer.versionfile_source = "src/wormhole/_version.py" versioneer.versionfile_build = "wormhole/_version.py" versioneer.tag_prefix = "" -versioneer.parentdir_prefix = "wormhole-sync" +versioneer.parentdir_prefix = "magic-wormhole" commands = versioneer.get_cmdclass() @@ -16,7 +16,7 @@ setup(name="magic-wormhole", author="Brian Warner", author_email="[email protected]", license="MIT", - url="https://github.com/warner/wormhole-sync", + url="https://github.com/warner/magic-wormhole", package_dir={"": "src"}, packages=["wormhole", "wormhole.blocking", "wormhole.scripts",
setup.py: finish renaming
py
diff --git a/superset/migrations/versions/3317e9248280_add_creation_method_to_reports_model.py b/superset/migrations/versions/3317e9248280_add_creation_method_to_reports_model.py index <HASH>..<HASH> 100644 --- a/superset/migrations/versions/3317e9248280_add_creation_method_to_reports_model.py +++ b/superset/migrations/versions/3317e9248280_add_creation_method_to_reports_model.py @@ -34,7 +34,7 @@ def upgrade(): with op.batch_alter_table("report_schedule") as batch_op: batch_op.add_column( sa.Column( - "creation_method", sa.VARCHAR(255), server_default="alert_reports", + "creation_method", sa.VARCHAR(255), server_default="alerts_reports", ) ) batch_op.create_index(
oops (#<I>)
py
diff --git a/ceph_deploy/conf/cephdeploy.py b/ceph_deploy/conf/cephdeploy.py index <HASH>..<HASH> 100644 --- a/ceph_deploy/conf/cephdeploy.py +++ b/ceph_deploy/conf/cephdeploy.py @@ -73,6 +73,11 @@ def create_stub(_path=None): class Conf(SafeConfigParser): + """ + Subclasses from SafeConfigParser to give a few helpers for the ceph-deploy + configuration. Specifically, it addresses the need to work with custom + sections that signal the usage of custom repositories. + """ reserved_sections = ['ceph-deploy-global', 'ceph-deploy-install']
update docstring for conf class
py
diff --git a/tests/unit/phases/MultiPhaseTest.py b/tests/unit/phases/MultiPhaseTest.py index <HASH>..<HASH> 100644 --- a/tests/unit/phases/MultiPhaseTest.py +++ b/tests/unit/phases/MultiPhaseTest.py @@ -195,6 +195,7 @@ class MultiPhaseTest: def test_multiphase_invalid_occupancy(self): m = op.phases.MultiPhase(network=self.net, phases=[self.water, self.air]) + # The next line ideally should throw an Exception, but warning for now m.set_occupancy(phase=self.water, Pvals=1.5, Tvals=2.5)
Add helper comment for one of the tests, for future reference
py
diff --git a/tinymce/widgets.py b/tinymce/widgets.py index <HASH>..<HASH> 100644 --- a/tinymce/widgets.py +++ b/tinymce/widgets.py @@ -123,7 +123,7 @@ class AdminTinyMCE(TinyMCE, admin_widgets.AdminTextareaWidget): def get_language_config(content_language=None): language = get_language() - language = parse_language(language) if language is not None else "en" + language = parse_language(language) if language is not None else "en_US" if content_language: content_language = content_language[:2] else: @@ -160,7 +160,7 @@ def get_language_config(content_language=None): def parse_language(lang: str) -> str: """If language code is in format xx-yy, convert it into xx_YY. - Otherwise just return unchanged language code. + Otherwise just return unchanged language code. Args: lang (str): The language code to be parsed.
BugFix: Change default language to meet standards
py
diff --git a/cyther/test.py b/cyther/test.py index <HASH>..<HASH> 100644 --- a/cyther/test.py +++ b/cyther/test.py @@ -41,7 +41,7 @@ def test_utilities(): test_dict_file() test_extract() #test_find() - #display_configure() + display_configure() display_direct() display_resources() print('<@test.py> All utility tests have been passed')
Re-enabled testing of 'configuration'
py
diff --git a/docs/source/conf.py b/docs/source/conf.py index <HASH>..<HASH> 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -48,9 +48,9 @@ copyright = u'2012, Doug Hellmann' # built documents. # # The short X.Y version. -version = '0.1' +version = '0.2' # The full version, including alpha/beta/rc tags. -release = '0.1' +release = '0.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.
fix version # in doc build script
py
diff --git a/wallace/models.py b/wallace/models.py index <HASH>..<HASH> 100644 --- a/wallace/models.py +++ b/wallace/models.py @@ -551,7 +551,10 @@ class Node(Base): .filter_by(destination_id=self.id, status="received", failed=False).all() info_ids = [t.info_id for t in transmissions] - return type.query.filter(type.id.in_(info_ids)).all() + if info_ids: + return type.query.filter(type.id.in_(info_ids)).all() + else: + return [] def transmissions(self, direction="outgoing", status="all"): """
received_infos only does a query if transmissions have been received
py
diff --git a/cc_core/commons/schemas/engines/execution.py b/cc_core/commons/schemas/engines/execution.py index <HASH>..<HASH> 100644 --- a/cc_core/commons/schemas/engines/execution.py +++ b/cc_core/commons/schemas/engines/execution.py @@ -19,7 +19,7 @@ ccagency_schema = { 'outdir': {'type': 'string'}, 'protectedKeys': { 'type': 'array', - 'items': {'type', 'string'} + 'items': {'type': 'string'} } }, 'additionalProperties': False
fixed type in ccagency schema
py
diff --git a/salt/cloud/__init__.py b/salt/cloud/__init__.py index <HASH>..<HASH> 100644 --- a/salt/cloud/__init__.py +++ b/salt/cloud/__init__.py @@ -1388,11 +1388,14 @@ class Cloud(object): if main_cloud_config is None: main_cloud_config = {} + mapped_providers = self.map_providers_parallel() profile_details = self.opts['profiles'][profile] + vms = {} + for prov in mapped_providers: + prov_name = mapped_providers[prov].keys()[0] + for node in mapped_providers[prov][prov_name]: + vms[node] = mapped_providers[prov][prov_name][node] alias, driver = profile_details['provider'].split(':') - mapped_providers = self.map_providers_parallel() - alias_data = mapped_providers.setdefault(alias, {}) - vms = alias_data.setdefault(driver, {}) provider_details = self.opts['providers'][alias][driver].copy() del provider_details['profiles']
Check all providers, not just the current one
py
diff --git a/pysparkling/partition.py b/pysparkling/partition.py index <HASH>..<HASH> 100644 --- a/pysparkling/partition.py +++ b/pysparkling/partition.py @@ -8,9 +8,11 @@ log = logging.getLogger(__name__) class Partition(object): def __init__(self, x, idx=None): self.index = idx - self._x = list(x) + self._x = x def x(self): + if not isinstance(self._x, list): + self._x = list(self._x) return self._x def hashCode(self): @@ -19,5 +21,5 @@ class Partition(object): def __getstate__(self): return { 'index': self.index, - '_x': self._x, + '_x': self.x(), }
Partition: own data, but lazily load it
py
diff --git a/oceansdb/etopo.py b/oceansdb/etopo.py index <HASH>..<HASH> 100644 --- a/oceansdb/etopo.py +++ b/oceansdb/etopo.py @@ -70,7 +70,7 @@ class ETOPO_var_nc(object): self.load_dims() def keys(self): - return ['depth'] + return ['elevation'] def load_dims(self): self.dims = { @@ -105,7 +105,7 @@ class ETOPO_var_nc(object): #for v, vin in zip(var, varin): # subset[v] = ma.asanyarray( # [self.ncs[0][vin][yn, xn]]) - subset = {'depth': self.ncs[0].variables['ROSE'][yn, xn]} + subset = {'elevation': self.ncs[0].variables['ROSE'][yn, xn]} return subset, dims
Renaming depth->elevation at ETOPO(). ETOPO gives elevations, so bellow sea level is < 0. Elevation makes that explicit.
py
diff --git a/skorch/tests/test_net.py b/skorch/tests/test_net.py index <HASH>..<HASH> 100644 --- a/skorch/tests/test_net.py +++ b/skorch/tests/test_net.py @@ -2608,7 +2608,7 @@ class TestNeuralNet: rv = np.random.random((20, 5)) net.forward_iter = lambda *args, **kwargs: (torch.as_tensor(rv) for _ in range(2)) - # 2 batches, mock return value hs shape 20,5 thus y_proba has + # 2 batches, mock return value has shape 20,5 thus y_proba has # shape 40,5 y_proba = net.predict_proba(X) assert y_proba.shape == (40, 5)
Reviewer comment: fix typo in comment
py
diff --git a/exrex.py b/exrex.py index <HASH>..<HASH> 100644 --- a/exrex.py +++ b/exrex.py @@ -80,7 +80,6 @@ def _gen(d, limit=20, count=False): strings *= len(subs) ret = comb(ret, subs) elif i[0] == 'max_repeat': - # TODO limit range max chars = filter(None, _gen(list(i[1][2]), limit)) if i[1][1]+1 - i[1][0] > limit: ran = xrange(i[1][0], i[1][0]+limit+1) @@ -88,7 +87,7 @@ def _gen(d, limit=20, count=False): ran = xrange(i[1][0], i[1][1]+1) if count: for i in ran: - strings *= pow(len(chars)+1, i) + strings *= pow(len(chars), i) ret = prods(ret, ran, chars) elif i[0] == 'branch': subs = chain.from_iterable(_gen(list(x), limit) for x in i[1][1])
[mod] count mods, comments removed
py
diff --git a/bogo/new_bogo_engine.py b/bogo/new_bogo_engine.py index <HASH>..<HASH> 100644 --- a/bogo/new_bogo_engine.py +++ b/bogo/new_bogo_engine.py @@ -45,7 +45,7 @@ def process_key(string, key, raw_key_sequence=[], config=None): def default_return(): return string + key - if config == None: + if config is None: return default_return() # NOTE to whoever reading this: @@ -118,7 +118,7 @@ def get_transformation_list(key, im, raw_key_sequence): for i, trans in enumerate(trans_list): if trans[0] == '<' and key.isalpha(): - trans_list[i] = trans[0] + utils.change_case(trans[1], + trans_list[i] = trans[0] + utils.change_case(trans[1], int(key.isupper())) if trans_list == ['_']:
PEP8 in new_bogo_engine.py and more test cases
py
diff --git a/astral/astral.py b/astral/astral.py index <HASH>..<HASH> 100644 --- a/astral/astral.py +++ b/astral/astral.py @@ -96,7 +96,7 @@ except ImportError: __all__ = ['Location','AstralGeocoder','GoogleGeocoder','Astral','AstralError'] -__version__ = "0.7.3" +__version__ = "0.7.4" __author__ = "Simon Kennedy <[email protected]>" # name,region,longitude,latitude,timezone,elevation
Bumped version number to <I>
py
diff --git a/scrape/scrape.py b/scrape/scrape.py index <HASH>..<HASH> 100755 --- a/scrape/scrape.py +++ b/scrape/scrape.py @@ -67,6 +67,7 @@ BASEDIR = "awacs" IGNORED_SERVICE_ALIASES = { "Amazon Kinesis Analytics V2": "kinesisanalytics", "Amazon Pinpoint Email Service": "ses", + "AWS IoT Greengrass V2": "greengrass", "AWS Marketplace Catalog": "aws-marketplace", "AWS Marketplace Entitlement Service": "aws-marketplace", "AWS Marketplace Image Building Service": "aws-marketplace", @@ -143,7 +144,7 @@ async def collect_existing_actions() -> Dict[str, Set[str]]: async def collect_service_info() -> List[httpx.Response]: - max_connections = 5 + max_connections = 2 async with httpx.AsyncClient( http2=True, limits=httpx.Limits(max_connections=max_connections),
Fix scrape script breakage - Map "AWS IoT Greengrass V2" to "greengrass" - Reduce max connections to prevent server busy captcha prompt
py
diff --git a/ansible_runner/interface.py b/ansible_runner/interface.py index <HASH>..<HASH> 100644 --- a/ansible_runner/interface.py +++ b/ansible_runner/interface.py @@ -140,7 +140,7 @@ def run(**kwargs): :param ident: The run identifier for this invocation of Runner. Will be used to create and name the artifact directory holding the results of the invocation. :param json_mode: Store event data in place of stdout on the console and in the stdout file - :param playbook: The playbook (either supplied here as a list or string... or as a path relative to + :param playbook: The playbook (either a list or dictionary of plays, or as a path relative to ``private_data_dir/project``) that will be invoked by runner when executing Ansible. :param module: The module that will be invoked in ad-hoc mode by runner when executing Ansible. :param module_args: The module arguments that will be supplied to ad-hoc mode.
Correct docstring A string isn't valid.
py
diff --git a/awkward/array/jagged.py b/awkward/array/jagged.py index <HASH>..<HASH> 100644 --- a/awkward/array/jagged.py +++ b/awkward/array/jagged.py @@ -92,7 +92,7 @@ class JaggedArray(awkward.array.base.AwkwardArrayWithContent): changes[-1] = len(parents) changes[1:-1] = tmp - length = parents.max() + 1 + length = parents.max() + 1 if parents.size > 0 else 0 starts = cls.numpy.zeros(length, dtype=cls.JaggedArray.fget(None).INDEXTYPE) counts = cls.numpy.zeros(length, dtype=cls.JaggedArray.fget(None).INDEXTYPE)
Also patch zero-length parents, although may never get used
py
diff --git a/leancloud/app_router.py b/leancloud/app_router.py index <HASH>..<HASH> 100644 --- a/leancloud/app_router.py +++ b/leancloud/app_router.py @@ -15,10 +15,11 @@ from leancloud import utils class AppRouter(object): def __init__(self, app_id, region): + self.app_id = app_id + self.region = region self.hosts = {} self.session = requests.Session() self.lock = threading.Lock() - self.app_id = app_id self.expired_at = 0 if region == 'US': self.hosts['api'] = 'us-api.leancloud.cn' @@ -41,16 +42,14 @@ class AppRouter(object): raise RuntimeError('invalid region: {}'.format(region)) def get(self, type_): + if self.region == 'US': + # US region dose not support app router stuff + return self.hosts[type_] + with self.lock: - expired = time.time() > self.expired_at - is_expired = False - if expired: + if time.time() > self.expired_at: self.expired_at += 600 - is_expired = True - if is_expired: - self.refresh() - threading.Thread(target=self.refresh).start() - with self.lock: + threading.Thread(target=self.refresh).start() return self.hosts[type_] def refresh(self):
fix: don't refresh app router on US region
py
diff --git a/requests_oauthlib/oauth1_session.py b/requests_oauthlib/oauth1_session.py index <HASH>..<HASH> 100644 --- a/requests_oauthlib/oauth1_session.py +++ b/requests_oauthlib/oauth1_session.py @@ -33,7 +33,6 @@ class TokenRequestDenied(ValueError): super(TokenRequestDenied, self).__init__(message) self.response = response - @property def status_code(self): """For backwards-compatibility purposes""" return self.response.status_code @@ -171,7 +170,6 @@ class OAuth1Session(requests.Session): ) self.auth = self._client - @property def token(self): oauth_token = self._client.client.resource_owner_key oauth_token_secret = self._client.client.resource_owner_secret @@ -191,7 +189,6 @@ class OAuth1Session(requests.Session): def token(self, value): self._populate_attributes(value) - @property def authorized(self): """Boolean that indicates whether this session has an OAuth token or not. If `self.authorized` is True, you can reasonably expect
will this fix coverage diff of -<I>
py
diff --git a/test_isort.py b/test_isort.py index <HASH>..<HASH> 100644 --- a/test_isort.py +++ b/test_isort.py @@ -1122,8 +1122,8 @@ def test_keep_comments(): test_input = ("from a import b, c # My Comment1\n" "from a import c, d # My Comment2 is really really really really long\n") assert SortImports(file_contents=test_input, line_length=45).output == \ - ("from a import (b, # My Comment1; My Comment2 is really really really really long\n" - " c, d)\n") + ("from a import ( # My Comment1; My Comment2 is really really really really long\n" + " b, c, d)\n") # Test that comments are not stripped from 'import ... as ...' by default test_input = ("from a import b as bb # b comment\n"
Fixed test for fallback to mode 4 change.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -37,7 +37,7 @@ def get_version(module='spyder_kernels'): REQUIREMENTS = ['ipykernel>=4.8.2', 'pyzmq>=17', - 'jupyter-client>=5.2.5', + 'jupyter-client>=5.3.4', 'cloudpickle', 'wurlitzer>=1.0.3;platform_system!="Windows"']
Require more recent version of jupyter-client This avoids an ugly bug on Windows
py
diff --git a/dnanexus_utils.py b/dnanexus_utils.py index <HASH>..<HASH> 100755 --- a/dnanexus_utils.py +++ b/dnanexus_utils.py @@ -376,7 +376,7 @@ class DxSeqResults: if not os.path.isdir(download_dir): os.makedirs(download_dir) logger.info("Downloading the FASTQC reports to {download_dir}.".format(download_dir=download_dir)) - dxpy.download_folder(project=self.dx_project_id,destdir=download_dir,folder=self.DX_FASTQC_FOLDER,overwrite=False) + dxpy.download_folder(project=self.dx_project_id,destdir=download_dir,folder=self.DX_FASTQC_FOLDER,overwrite=True) #rename the downloaded folder to ${download_dir}/FASTQC os.rename(os.path.join(download_dir,self.DX_FASTQC_FOLDER.split("/")[-1]),os.path.join(download_dir,"FASTQC")) return os.path.join(download_dir,"FASTQC")
Set the downloading of FASTQC reports to overwrite asif need beg
py
diff --git a/datajoint/autopopulate.py b/datajoint/autopopulate.py index <HASH>..<HASH> 100644 --- a/datajoint/autopopulate.py +++ b/datajoint/autopopulate.py @@ -81,6 +81,7 @@ class AutoPopulate: jobs = self.connection.jobs[self.target.database] if reserve_jobs else None todo -= self.target.proj() + n_to_populate = len(todo) keys = todo.fetch.keys() if order == "reverse": keys = list(keys) @@ -89,6 +90,7 @@ class AutoPopulate: keys = list(keys) random.shuffle(keys) + logger.info('Found %d keys to populate' % n_to_populate) for key in keys: if not reserve_jobs or jobs.reserve(self.target.table_name, key): self.connection.start_transaction()
Report number of missing keys to be populated
py
diff --git a/Lib/ufo2ft/markFeatureWriter.py b/Lib/ufo2ft/markFeatureWriter.py index <HASH>..<HASH> 100644 --- a/Lib/ufo2ft/markFeatureWriter.py +++ b/Lib/ufo2ft/markFeatureWriter.py @@ -160,19 +160,21 @@ class MarkFeatureWriter(object): # nothing to do, don't write empty feature return featureName = "mkmk" if isMkmk else "mark" - - lines.append("feature %s {" % featureName) + feature = [] for i, anchorPair in enumerate(anchorList): lookupName = "%s%d" % (featureName, i + 1) - self._addMarkLookup(lines, lookupName, isMkmk, anchorPair) + self._addMarkLookup(feature, lookupName, isMkmk, anchorPair) if not isMkmk: for i, anchorPairs in enumerate(self.ligaAnchorList): lookupName = "mark2liga%d" % (i + 1) - self._addMarkToLigaLookup(lines, lookupName, anchorPairs) + self._addMarkToLigaLookup(feature, lookupName, anchorPairs) - lines.append("} %s;\n" % featureName) + if feature: + lines.append("feature %s {" % featureName) + lines.extend(feature) + lines.append("} %s;\n" % featureName) def setupAnchorPairs(self): """
[markFeatureWriter] skip writing mark/mkmk feature if empty sometimes empty blocks like this are emitted by the MarkFeatureWriter: feature mkmk { } mkmk; feaLib silently ignores them, while MakeOTF crashes on them with syntax error. <URL>
py
diff --git a/src/CPDs.py b/src/CPDs.py index <HASH>..<HASH> 100644 --- a/src/CPDs.py +++ b/src/CPDs.py @@ -3,6 +3,7 @@ import numpy as np + class TabularCPD(): """Represents the CPD of a node in tabular form""" def __init__(self, cpd):
Fixed PEP8 non-compliance
py
diff --git a/paramiko/message.py b/paramiko/message.py index <HASH>..<HASH> 100644 --- a/paramiko/message.py +++ b/paramiko/message.py @@ -67,9 +67,6 @@ class Message (object): def asbytes(self): """ Return the byte stream content of this Message, as bytes. - - @return: the contents of this Message. - @rtype: bytes """ return self.packet.getvalue() @@ -241,8 +238,7 @@ class Message (object): """ Add an integer to the stream. - @param n: integer to add - @type n: int + :param int n: integer to add """ self.packet.write(struct.pack('>I', n)) return self
Some Epydoc->Sphinx stuff that came from the merge
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ from setuptools import setup setup(name='microsoftbotframework', - version='0.1.10', + version='0.1.11', description='A wrapper for the microsoft bot framework API', classifiers=[ 'Development Status :: 3 - Alpha',
Upldated Version to <I>
py
diff --git a/findiff/operators.py b/findiff/operators.py index <HASH>..<HASH> 100644 --- a/findiff/operators.py +++ b/findiff/operators.py @@ -270,7 +270,7 @@ class FinDiff(UnaryOperator): for off, w in zip(offsets, weights): multi_slice[dim] = i + off - yd[ref_multi_slice] += w * y[multi_slice] + yd[tuple(ref_multi_slice)] += w * y[tuple(multi_slice)] return yd @@ -288,9 +288,9 @@ class FinDiff(UnaryOperator): off_multi_slice = [all] * ndims off_multi_slice[dim] = s if abs(1 - w) < 1.E-14: - yd[ref_multi_slice] += y[off_multi_slice] + yd[tuple(ref_multi_slice)] += y[tuple(off_multi_slice)] else: - yd[ref_multi_slice] += w * y[off_multi_slice] + yd[tuple(ref_multi_slice)] += w * y[tuple(off_multi_slice)] def _shift_slice(self, sl, off, max_index):
Replace deprecated use of non-tuple sequences in multidimensional indexing
py
diff --git a/tests/test_modeling_xlm.py b/tests/test_modeling_xlm.py index <HASH>..<HASH> 100644 --- a/tests/test_modeling_xlm.py +++ b/tests/test_modeling_xlm.py @@ -297,7 +297,7 @@ class XLMModelTester: self.parent.assertListEqual(list(result["loss"].size()), []) self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.type_sequence_label_size]) - def create_and_check_xlm_for_token_classification( + def create_and_check_xlm_token_classif( self, config, input_ids, @@ -383,9 +383,9 @@ class XLMModelTest(ModelTesterMixin, unittest.TestCase): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*config_and_inputs) - def test_xlm_for_token_classification(self): + def test_xlm_token_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_xlm_for_token_classification(*config_and_inputs) + self.model_tester.create_and_check_xlm_token_classif(*config_and_inputs) @slow def test_model_from_pretrained(self):
rename the function to match the rest of the test convention (#<I>)
py
diff --git a/jupytext/combine.py b/jupytext/combine.py index <HASH>..<HASH> 100644 --- a/jupytext/combine.py +++ b/jupytext/combine.py @@ -36,6 +36,13 @@ def combine_inputs_with_outputs(nb_source, nb_outputs): if key not in nb_outputs_filtered_metadata: nb_source.metadata[key] = nb_outputs.metadata[key] + if nb_source.metadata.get('jupytext', {}).get('formats') or ext in ['.md', '.Rmd'] or not \ + nb_source.metadata.get('jupytext', {}).get('text_representation', {}).get('format_name'): + nb_source.metadata.get('jupytext', {}).pop('text_representation', None) + + if not nb_source.metadata.get('jupytext', {}): + nb_source.metadata.pop('jupytext', {}) + for cell in nb_source.cells: # Remove outputs to warranty that trust of returned notebook is that of second notebook if cell.cell_type == 'code':
Drop the text_representation when redundant with the format information
py
diff --git a/layouts/__init__.py b/layouts/__init__.py index <HASH>..<HASH> 100644 --- a/layouts/__init__.py +++ b/layouts/__init__.py @@ -33,7 +33,7 @@ from github import Github, GithubException ## Variables -__version__ = '0.4.3' +__version__ = '0.4.4' log = logging.getLogger(__name__) @@ -156,6 +156,7 @@ class Layouts: tar = tarfile.open(filepath) tar.extractall(cache_dir) os.rename(dirname_orig_path, dirname_path) + tar.close() # Remove tar.gz os.remove(filepath)
Missing tarfile close before trying to delete it - Incrementing to <I>
py
diff --git a/openquake/engine/celery_node_monitor.py b/openquake/engine/celery_node_monitor.py index <HASH>..<HASH> 100644 --- a/openquake/engine/celery_node_monitor.py +++ b/openquake/engine/celery_node_monitor.py @@ -93,11 +93,21 @@ class CeleryNodeMonitor(object): Check that the expected celery nodes are all up. The loop continues until the main thread keeps running. """ - while self.job_running: - time.sleep(self.interval) + while self.main_is_running(sleep=self.interval): live_nodes = set(celery.task.control.inspect().ping() or {}) if live_nodes < self.live_nodes: print >> sys.stderr, 'Cluster nodes not accessible: %s' % ( self.live_nodes - live_nodes) os.kill(os.getpid(), signal.SIGABRT) # commit suicide break + + def main_is_running(self, sleep): + """ + Check for 50 times during the sleep interval if the flag + self.job_running becomes false and then exit. + """ + for _ in range(50): + if not self.job_running: + break + time.sleep(sleep / 50.) + return self.job_running
Made more responsive the check_nodes thread
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -73,7 +73,7 @@ if sys.version < '3': ) extras_require = { - 'rtree': ["Rtree==0.8.2"], + 'rtree': ["Rtree >=0.8.2, <0.8.4"], 'celery': ["celery >=3.1, <4.0"], 'plotting': ["matplotlib >=1.5"], }
Allow installation of Rtree <I> Former-commit-id: 0b9adbea<I>f1f0c0ef<I>acf5b2b<I>e6bc4c4f
py
diff --git a/eventsourcing/system.py b/eventsourcing/system.py index <HASH>..<HASH> 100644 --- a/eventsourcing/system.py +++ b/eventsourcing/system.py @@ -1,7 +1,17 @@ from abc import ABC, abstractmethod from collections import defaultdict from threading import Event, Lock, Thread -from typing import Dict, Iterable, Iterator, List, Set, Tuple, Type, TypeVar +from typing import ( + Dict, + Iterable, + Iterator, + List, + Optional, + Set, + Tuple, + Type, + TypeVar, +) from eventsourcing.application import Application, NotificationLog, Section from eventsourcing.domain import Aggregate, AggregateEvent @@ -22,7 +32,7 @@ class ProcessEvent: new domain events that result from processing that notification. """ - def __init__(self, tracking: Tracking): + def __init__(self, tracking: Optional[Tracking] = None): """ Initalises the process event with the given tracking object. """
Adjusted the type annotation for the 'tracking' arg of ProcessEvent class.
py
diff --git a/wafer/management/commands/wafer_add_default_groups.py b/wafer/management/commands/wafer_add_default_groups.py index <HASH>..<HASH> 100644 --- a/wafer/management/commands/wafer_add_default_groups.py +++ b/wafer/management/commands/wafer_add_default_groups.py @@ -14,6 +14,9 @@ class Command(BaseCommand): ('pages', 'change_page'), ('pages', 'add_file'), ('pages', 'delete_file'), ('pages', 'change_file'), ), + 'Page Content Editors': ( + ('pages', 'change_page'), + ), 'Talk Mentors': ( ('talks', 'change_talk'), ('talks', 'view_all_talks'), ('talks', 'edit_private_notes'), @@ -23,6 +26,9 @@ class Command(BaseCommand): ('talks', 'edit_private_notes'), ('talks', 'add_review'), ), + 'View All Talks': ( + ('talks', 'view_all_talks'), + ), 'Registration': (), }
Add a couple more possibly useful groups
py
diff --git a/generators/xml.py b/generators/xml.py index <HASH>..<HASH> 100644 --- a/generators/xml.py +++ b/generators/xml.py @@ -170,6 +170,18 @@ class Xml(Generator): ret.append(self.type_to_xml(arg.type, node.parent)) elem.append(ret) + def method_to_xml(self, node, elem): + self.function_to_xml(node, elem) + + if node.virtual: + elem.set('virtual', 'yes') + + if node.static: + elem.set('static', 'yes') + + if node.abstract: + elem.set('abstract', 'yes') + def typedef_to_xml(self, node, elem): elem.append(self.type_to_xml(node.type, node))
Set virtual, static, abstract on method
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from ast import literal_eval from setuptools import setup, find_packages, Extension import re @@ -8,9 +9,8 @@ def module_attr_re(attr): def grep_attr(body, attr): - return str(literal_eval(module_attr_re(attr).search( - body.decode("utf-8") - ).group(1))) + return literal_eval(module_attr_re(attr).search(body).group(1)) + with open("cider/__init__.py", "r") as f: body = f.read()
Fix Python 3 support (closes #6)
py
diff --git a/datapoint/Manager.py b/datapoint/Manager.py index <HASH>..<HASH> 100644 --- a/datapoint/Manager.py +++ b/datapoint/Manager.py @@ -254,7 +254,7 @@ class Manager(object): warning_message = 'This function is deprecated. Use get_nearest_forecast_site() instead' warn(warning_message, DeprecationWarning, stacklevel=2) - return self.get_nearest_site(latitude, longitude) + return self.get_nearest_forecast_site(latitude, longitude) def get_nearest_forecast_site(self, latitude=None, longitude=None): """
Fix erroneous recursion
py
diff --git a/flink-python/pyflink/datastream/tests/test_data_stream.py b/flink-python/pyflink/datastream/tests/test_data_stream.py index <HASH>..<HASH> 100644 --- a/flink-python/pyflink/datastream/tests/test_data_stream.py +++ b/flink-python/pyflink/datastream/tests/test_data_stream.py @@ -632,8 +632,6 @@ class DataStreamTests(object): self.map_state = runtime_context.get_map_state(map_state_descriptor) def process_element(self, value, ctx): - import time - time.sleep(1) current_value = self.value_state.value() self.value_state.update(value[0]) current_list = [_ for _ in self.list_state.get()]
[FLINK-<I>][python] Fix the unstable test test_keyed_process_function_with_state in PyFlink This closes #<I>.
py
diff --git a/shinken/objects/host.py b/shinken/objects/host.py index <HASH>..<HASH> 100644 --- a/shinken/objects/host.py +++ b/shinken/objects/host.py @@ -211,7 +211,7 @@ class Host(SchedulingItem): 'got_default_realm' : BoolProp(default=False), # use for having all contacts we have notified - 'notified_contacts': StringProp(default=set()), + 'notified_contacts': StringProp(default=set(),retention=True), 'in_scheduled_downtime': BoolProp(default=False, retention=True), 'in_scheduled_downtime_during_last_check': BoolProp(default=False, retention=True),
Fix notified_contacts for host
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -49,7 +49,7 @@ setup( author="Chris Gemignani", author_email="[email protected]", url="https://github.com/juiceinc/recipe", - packages=find_packages(), + packages=find_packages(include=["recipe*"]), include_package_data=True, license="MIT", classifiers=[
Avoid installing a top-level `tests` package in setup.py (#<I>)
py
diff --git a/hotdoc/extensions/gst/gst_extension.py b/hotdoc/extensions/gst/gst_extension.py index <HASH>..<HASH> 100644 --- a/hotdoc/extensions/gst/gst_extension.py +++ b/hotdoc/extensions/gst/gst_extension.py @@ -1000,7 +1000,8 @@ class GstExtension(Extension): filename=plugin_name, extra={'gst-element-name': pagename}, rank=str(element['rank']), author=element['author'], - classification=element['klass'], plugin=plugin['filename'], + classification=element['klass'], + plugin=plugin_name, aliases=aliases, package=plugin['package'], interfaces=interfaces) @@ -1042,7 +1043,7 @@ class GstExtension(Extension): GstPluginSymbol, description=plugin['description'], display_name=plugin_name, - unique_name='plugin-' + plugin['filename'], + unique_name='plugin-' + plugin_name, license=plugin['license'], package=plugin['package'], filename=plugin['filename'],
gst: Use plugin name to reference plugins Instead of the filename
py
diff --git a/parcalc/parcalc.py b/parcalc/parcalc.py index <HASH>..<HASH> 100644 --- a/parcalc/parcalc.py +++ b/parcalc/parcalc.py @@ -60,7 +60,7 @@ import time import os import tempfile import shutil -import copy +from copy import deepcopy from subprocess import check_output from contextlib import contextmanager @@ -310,7 +310,7 @@ class __PCalcProc(Process): def run(self): with work_dir(self.place) : n,system=self.iq.get() - system.set_calculator(copy.deepcopy(self.calc)) + system.set_calculator(deepcopy(self.calc)) system.get_calculator().block=True system.get_calculator().working_dir=self.place #print("Start at :", self.place) @@ -368,7 +368,7 @@ def ParCalculate(systems,calc,cleanup=True,block=True,prefix="Calc_"): basedir=os.getcwd() res=[] for n,s in enumerate(sysl): - s.set_calculator(copy.deepcopy(calc)) + s.set_calculator(deepcopy(calc)) s.get_calculator().block=block place=tempfile.mkdtemp(prefix=prefix, dir=basedir) os.chdir(place) @@ -396,7 +396,6 @@ if __name__ == '__main__': from ase.lattice.spacegroup import crystal from ase.units import GPa import elastic - from elastic.parcalc import ParCalculate, ClusterVasp import numpy from pylab import *
Directly import deepcopy. Correct import in tests for new package structure.
py
diff --git a/azure-mgmt-storage/azure/mgmt/storage/v2016_12_01/storage_management_client.py b/azure-mgmt-storage/azure/mgmt/storage/v2016_12_01/storage_management_client.py index <HASH>..<HASH> 100755 --- a/azure-mgmt-storage/azure/mgmt/storage/v2016_12_01/storage_management_client.py +++ b/azure-mgmt-storage/azure/mgmt/storage/v2016_12_01/storage_management_client.py @@ -61,9 +61,9 @@ class StorageManagementClient(object): :vartype config: StorageManagementClientConfiguration :ivar storage_accounts: StorageAccounts operations - :vartype storage_accounts: .operations.StorageAccountsOperations + :vartype storage_accounts: azure.mgmt.storage.v2016_12_01.operations.StorageAccountsOperations :ivar usage: Usage operations - :vartype usage: .operations.UsageOperations + :vartype usage: azure.mgmt.storage.v2016_12_01.operations.UsageOperations :param credentials: Credentials needed for the client to connect to Azure. :type credentials: :mod:`A msrestazure Credentials
Fix relative links in azure-mgmt-storage
py
diff --git a/util/compile.py b/util/compile.py index <HASH>..<HASH> 100755 --- a/util/compile.py +++ b/util/compile.py @@ -27,6 +27,10 @@ for language in languages: included_languages.append(language) js_files_to_include.append(path) +closure_path = '/usr/local/compiler-latest/compiler.jar' +if not os.path.isfile(closure_path): + sys.exit('could not find closure compiler at ' + closure_path) + print 'waiting for closure compiler...' proc = subprocess.Popen(['java', '-jar', '/usr/local/compiler-latest/compiler.jar', '--compilation_level', 'ADVANCED_OPTIMIZATIONS'] + js_files_to_include, stdout = subprocess.PIPE, stderr = subprocess.PIPE) output, err = proc.communicate()
Throw error if closure compiler is not installed
py
diff --git a/benchbuild/utils/__init__.py b/benchbuild/utils/__init__.py index <HASH>..<HASH> 100644 --- a/benchbuild/utils/__init__.py +++ b/benchbuild/utils/__init__.py @@ -6,13 +6,13 @@ get deleted afterwards. import sys import logging from types import ModuleType -from plumbum.commands.base import BoundCommand +from plumbum.machines.local import LocalCommand __ALIASES__ = {"unionfs": ["unionfs_fuse", "unionfs"]} LOG = logging.getLogger(__name__) -class ErrorCommand(BoundCommand): +class ErrorCommand(LocalCommand): """ A command that raises an exception when it gets called. This allows us to call the study with experiments who use incorrect imports, @@ -25,6 +25,11 @@ class ErrorCommand(BoundCommand): LOG.error("Unable to import a needed module.") raise AttributeError(__name__ + ".cmd") + def popen(self, *args, **kwargs): + """Simply raises the AttributeError for a missing command.""" + LOG.error("Unable to import a needed module.") + raise AttributeError(__name__ + ".cmd") + ERROR = ErrorCommand(__name__ + ".cmd", ErrorCommand.__doc__) @@ -89,5 +94,5 @@ del sys del logging del ModuleType del CommandAlias -del BoundCommand +del LocalCommand del ErrorCommand
fix: import a working ErrorCommand, if normal import fails. We already return an ErrorCommand object when we fail to import the requested command. However, the existing code fails to return a command that uses the plumbum API correctly. This triggers an exception when we deal with portage_gen tests and don't have a working uchroot environment available.
py
diff --git a/spyder_notebook/utils/nbopen.py b/spyder_notebook/utils/nbopen.py index <HASH>..<HASH> 100644 --- a/spyder_notebook/utils/nbopen.py +++ b/spyder_notebook/utils/nbopen.py @@ -54,7 +54,11 @@ def nbopen(filename): command = ['jupyter', 'notebook', '--no-browser', '--notebook-dir={}'.format(nbdir), '--NotebookApp.password='] - proc = subprocess.Popen(command) + if os.name == 'nt': + creation_flag = 0x08000000 # CREATE_NO_WINDOW + else: + creation_flag = 0 # Default value + proc = subprocess.Popen(command, creationflags=creation_flag) atexit.register(proc.terminate) # Wait ~10 secs for the server to be up
Adds flag to not show console in windows.
py
diff --git a/tests/test_program.py b/tests/test_program.py index <HASH>..<HASH> 100644 --- a/tests/test_program.py +++ b/tests/test_program.py @@ -209,6 +209,7 @@ class TestProgramFeatures(object): def test_triggerlist_targetlist_change2_namever(self, mysys): # TODO: investigate failure in https://travis-ci.org/tuomas2/automate/jobs/245124552 + # and https://travis-ci.org/tuomas2/automate/jobs/246254565 # TODO: check also similar tests above. p = mysys.p #add = {p} if isinstance(p, StatusObject) else set()
Add TODO about another failed Travis build
py
diff --git a/spacy/_ml.py b/spacy/_ml.py index <HASH>..<HASH> 100644 --- a/spacy/_ml.py +++ b/spacy/_ml.py @@ -243,8 +243,9 @@ class PrecomputableAffine(Model): def link_vectors_to_models(vocab): vectors = vocab.vectors if vectors.name is None: - raise ValueError( - "Unnamed vectors -- this won't allow multiple vectors " + vectors.name = VECTORS_KEY + print( + "Warning: Unnamed vectors -- this won't allow multiple vectors " "models to be loaded. (Shape: (%d, %d))" % vectors.data.shape) ops = Model.ops for word in vocab:
Warn and fallback if vectors have no name
py
diff --git a/python/ray/_private/services.py b/python/ray/_private/services.py index <HASH>..<HASH> 100644 --- a/python/ray/_private/services.py +++ b/python/ray/_private/services.py @@ -253,7 +253,8 @@ def get_address_info_from_redis_helper(redis_address, client_node_ip_address = client_info["NodeManagerAddress"] if (client_node_ip_address == node_ip_address or (client_node_ip_address == "127.0.0.1" - and redis_ip_address == get_node_ip_address())): + and redis_ip_address == get_node_ip_address()) + or client_node_ip_address == redis_ip_address): relevant_client = client_info break if relevant_client is None:
added address resolution fix for running in docker containers (#<I>) * added address resolution fix for running in docker containers * added address resolution fix for running in docker containers (java) * Update RayNativeRuntime.java
py
diff --git a/source/rafcon/statemachine/library_manager.py b/source/rafcon/statemachine/library_manager.py index <HASH>..<HASH> 100644 --- a/source/rafcon/statemachine/library_manager.py +++ b/source/rafcon/statemachine/library_manager.py @@ -97,7 +97,7 @@ class LibraryManager(Observable): # Replace environment variables path = os.path.expandvars(path) # If the path is relative, assume it is relative to the config file directory - if not path.startswith('/'): + if not os.path.isabs(path): path = os.path.join(config.global_config.path, path) # Clean path, e.g. replace /./ with / path = os.path.abspath(path)
Make relative path check OS independent Instead of checking whether a path starts with '/', use the Python method os.path.isabs(), which also works on e.g. Windows.
py
diff --git a/bibliopixel/builder/builder.py b/bibliopixel/builder/builder.py index <HASH>..<HASH> 100644 --- a/bibliopixel/builder/builder.py +++ b/bibliopixel/builder/builder.py @@ -100,4 +100,4 @@ class Builder(SavedDescription): run = {'run': {'threaded': False}} self.project = project.project( self.desc, run, root_file=self.project_file) - self.project.start() + self.project.run()
Clean projects up when called from Builder * Part of #<I>
py
diff --git a/settings.example.py b/settings.example.py index <HASH>..<HASH> 100644 --- a/settings.example.py +++ b/settings.example.py @@ -61,6 +61,6 @@ MQTT_BROKER = "10.0.0.1" # No extensions will be loaded per default # EXTENSIONS = [] # EXTENSIONS = [ -# b"org.homie.legacy-firmware:0.1.1:[4.x]", -# b"org.homie.legacy-stats:0.1.1:[4.x]", +# "org.homie.legacy-firmware:0.1.1:[4.x]", +# "org.homie.legacy-stats:0.1.1:[4.x]", # ]
Remove bytestring for extension in settings example
py
diff --git a/python/dllib/src/bigdl/dllib/utils/common.py b/python/dllib/src/bigdl/dllib/utils/common.py index <HASH>..<HASH> 100644 --- a/python/dllib/src/bigdl/dllib/utils/common.py +++ b/python/dllib/src/bigdl/dllib/utils/common.py @@ -581,6 +581,7 @@ def _get_gateway(): def callBigDlFunc(bigdl_type, name, *args): """ Call API in PythonBigDL """ gateway = _get_gateway() + args = [_py2java(gateway, a) for a in args] error = Exception("Cannot find function: %s" % name) for jinvoker in JavaCreator.instance(bigdl_type, gateway).value: # hasattr(jinvoker, name) always return true here, @@ -635,7 +636,6 @@ def _java2py(gateway, r, encoding="bytes"): def callJavaFunc(func, *args): """ Call Java Function """ gateway = _get_gateway() - args = [_py2java(gateway, a) for a in args] result = func(*args) return _java2py(gateway, result)
fix callBigDLFunc (#<I>)
py
diff --git a/examples/webcam/webcam.py b/examples/webcam/webcam.py index <HASH>..<HASH> 100644 --- a/examples/webcam/webcam.py +++ b/examples/webcam/webcam.py @@ -41,7 +41,7 @@ async def offer(request): pcs.discard(pc) # open webcam - options = {'video_size': '640x480'} + options = {'framerate': '30', 'video_size': '640x480'} if platform.system() == 'Darwin': player = MediaPlayer('default:none', format='avfoundation', options=options) else:
[examples] explicitly set frame rate to <I>fps This fixes #<I>
py
diff --git a/src/canmatrix/canmatrix.py b/src/canmatrix/canmatrix.py index <HASH>..<HASH> 100644 --- a/src/canmatrix/canmatrix.py +++ b/src/canmatrix/canmatrix.py @@ -171,10 +171,11 @@ class Signal(object): def multiplexSetter(self, value): self.mux_val = None self.is_multiplexer = False + ret_multiplex = None if value is not None and value != 'Multiplexor': ret_multiplex = int(value) self.mux_val = int(value) - else: # is it valid for None too? + elif value == 'Multiplexor': self.is_multiplexer = True ret_multiplex = value return ret_multiplex
Fix signal is_multiplexer being True by default The default should be False. The error was introduced probably by always applying the multiplexSetter in __attrs_post_init__ in commit a<I>.
py
diff --git a/pyvisa-py/usb.py b/pyvisa-py/usb.py index <HASH>..<HASH> 100644 --- a/pyvisa-py/usb.py +++ b/pyvisa-py/usb.py @@ -229,7 +229,8 @@ class USBInstrSession(USBSession): for name in ('SEND_END_EN', 'TERMCHAR', 'TERMCHAR_EN', 'TMO_VALUE'): attribute = getattr(constants, 'VI_ATTR_' + name) - self.attrs[attribute] = attributes.AttributesByID[attribute].default + self.set_attribute(attribute, + attributes.AttributesByID[attribute].default) @Session.register(constants.InterfaceType.usb, 'RAW') @@ -276,4 +277,5 @@ class USBRawSession(USBSession): for name in ('SEND_END_EN', 'TERMCHAR', 'TERMCHAR_EN', 'TMO_VALUE'): attribute = getattr(constants, 'VI_ATTR_' + name) - self.attrs[attribute] = attributes.AttributesByID[attribute].default + self.set_attribute(attribute, + attributes.AttributesByID[attribute].default)
Fix custom timeout for USBTMC sessions USBInstrSession contained code that directly manipulated self.attrs[], thus accidentally disabling any getters/setters (including the getter/setter for USB timeout). This is fixed by calling Session.set_attribute() instead of directly manipulating self.attrs[].
py
diff --git a/pymc/StepMethods.py b/pymc/StepMethods.py index <HASH>..<HASH> 100644 --- a/pymc/StepMethods.py +++ b/pymc/StepMethods.py @@ -651,9 +651,18 @@ class DrawFromPrior(StepMethod): self.generations = generations def step(self): + jumped = [] for generation in self.generations: - for s in generation: - s.rand() + try: + for s in generation: + jumped.append(s) + s.rand() + s.logp + except ZeroProbability: + warnings.warn('Stochastic %s: random method returned value with logp=0, rejecting.'%s.__name__) + for s in jumped: + s.revert() + break @classmethod def competence(s):
Protecting against illegal jumps in DrawFromPrior
py
diff --git a/gspreadsheet/tests.py b/gspreadsheet/tests.py index <HASH>..<HASH> 100644 --- a/gspreadsheet/tests.py +++ b/gspreadsheet/tests.py @@ -6,7 +6,8 @@ connect to a live spreadsheet whose contents cannot be guaranteed. So take the results of these tests with a grain of salt. """ -from unittest import TestCase, skip +from unittest import TestCase, skip, skipIf +import os from .gspreadsheet import GSpreadsheet, ReadOnlyException from .auth import Auth @@ -92,11 +93,17 @@ class Basics(TestCase): self.assertEqual(type(copy(row)), dict) self.assertEqual(type(row.copy()), dict) + +@skipIf('GOOGLE_ACCOUNT_EMAIL' not in os.environ or + 'GOOGLE_ACCOUNT_PASSWORD' not in os.environ, + 'These tests require being logged in') +class LoggedInTests(TestCase): def test_can_append_row(self): import datetime from . import __version__ as VERSION sheet = GSpreadsheet(WRITABLE_TEST_URL) - if sheet.is_authed: - sheet.append(dict(date=datetime.datetime.utcnow().isoformat(' ').split('.')[0], - value=str(VERSION))) + + self.assertTrue(sheet.is_authed) + sheet.append(dict(date=datetime.datetime.utcnow().isoformat(' ').split('.')[0], + value=str(VERSION)))
re-enable tests that required authentication
py
diff --git a/traffic/core/flight.py b/traffic/core/flight.py index <HASH>..<HASH> 100644 --- a/traffic/core/flight.py +++ b/traffic/core/flight.py @@ -1602,7 +1602,7 @@ class Flight( self, other: PointMixin, column_name: str = "bearing" ) -> "Flight": # temporary, should implement full stuff - size = len(self) + size = self.data.shape[0] return self.assign( **{ column_name: geo.bearing( @@ -1685,7 +1685,7 @@ class Flight( ) if isinstance(other, PointMixin): - size = len(self) + size = self.data.shape[0] return self.assign( **{ column_name: geo.distance(
ref #<I>, no need to resample before goaround
py
diff --git a/pyoko/form.py b/pyoko/form.py index <HASH>..<HASH> 100644 --- a/pyoko/form.py +++ b/pyoko/form.py @@ -257,6 +257,7 @@ class Form(ModelForm): class Button(BaseField): def __init__(self, *args, **kwargs): self.cmd = kwargs.pop('cmd', None) + self.flow = kwargs.pop('flow', None) super(Button, self).__init__(*args, **kwargs) solr_type = 'button'
added "flow" kwarg argument to button field
py
diff --git a/yotta/lib/access.py b/yotta/lib/access.py index <HASH>..<HASH> 100644 --- a/yotta/lib/access.py +++ b/yotta/lib/access.py @@ -216,7 +216,7 @@ def searchPathsForComponent(name, version_required, search_paths): def satisfyVersionFromAvailble(name, version_required, available): spec = None - if name in available: + if name in available and available[name]: logger.debug('satisfy %s from already installed components' % name) # we still need to check the version specification - which the remote # components know how to parse:
fix listing dependencies if the same module is missing twice
py
diff --git a/LiSE/LiSE/handle.py b/LiSE/LiSE/handle.py index <HASH>..<HASH> 100644 --- a/LiSE/LiSE/handle.py +++ b/LiSE/LiSE/handle.py @@ -1246,4 +1246,10 @@ class EngineHandle(object): self._real.game_start() def is_parent_of(self, parent, child): - return self._real.is_parent_of(parent, child) \ No newline at end of file + return self._real.is_parent_of(parent, child) + + def apply_choice(self, entity, key, value, dry_run=False): + return self.engine.apply_choice(entity, key, value, dry_run) + + def apply_choices(self, choices, dry_run=False, perfectionist=False): + return self.engine.apply_choices(choices, dry_run, perfectionist) \ No newline at end of file
Add apply_choice and apply_choices to the EngineHandle
py
diff --git a/docs/source/conf.py b/docs/source/conf.py index <HASH>..<HASH> 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -68,7 +68,7 @@ version = anom.__version__ # The full version, including alpha/beta/rc tags. release = anom.__version__ -versions = ['latest', version, '0.2.0', '0.1.0', '0.0.7', '0.0.6', '0.0.5', '0.0.4'] +versions = ['latest', version, '0.3.0', '0.2.0', '0.1.0', '0.0.7', '0.0.6', '0.0.5', '0.0.4'] # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.
chore: add <I> to doc versions list
py
diff --git a/iopipe/report.py b/iopipe/report.py index <HASH>..<HASH> 100644 --- a/iopipe/report.py +++ b/iopipe/report.py @@ -1,3 +1,4 @@ +import copy import json import logging import os @@ -191,4 +192,4 @@ class Report(object): logger.debug("Sending report to IOpipe:") logger.debug(json.dumps(self.report, indent=2, sort_keys=True)) - self.client.submit_future(send_report, self.report, self.config) + self.client.submit_future(send_report, copy.deepcopy(self.report), self.config)
Try making a copy of report before sending
py
diff --git a/python/deepwater/models/test_inception.py b/python/deepwater/models/test_inception.py index <HASH>..<HASH> 100644 --- a/python/deepwater/models/test_inception.py +++ b/python/deepwater/models/test_inception.py @@ -20,15 +20,15 @@ class TestInceptionV4(unittest.TestCase): use_debug_session=False, ) - def test_inceptionv4_must_converge_on_CIFAR10(self): - CIFAR10_must_converge("inceptionv4", inception.InceptionV4, - optimizers.RMSPropOptimizer, - batch_size=16, - epochs=90, - initial_learning_rate=0.2, - summaries=False, - use_debug_session=False, - ) +# def test_inceptionv4_must_converge_on_CIFAR10(self): +# CIFAR10_must_converge("inceptionv4", inception.InceptionV4, +# optimizers.RMSPropOptimizer, +# batch_size=16, +# epochs=90, +# initial_learning_rate=0.2, +# summaries=False, +# use_debug_session=False, +# ) def test_inceptionv4_cat_dog_mouse_must_converge(self): train_error = cat_dog_mouse_must_converge("inceptionv4", inception.InceptionV4,
disable CIFAR<I> tests since they are very slow
py
diff --git a/tests/data_context/test_data_context.py b/tests/data_context/test_data_context.py index <HASH>..<HASH> 100644 --- a/tests/data_context/test_data_context.py +++ b/tests/data_context/test_data_context.py @@ -322,6 +322,10 @@ def test_normalize_data_asset_names_conditions_single_name(): # - "mydatasource/mygenerator/myotherasset" # "mydatasource/myasset/mypurpose" -> "mydatasource/mygenerator/myasset/mypurpose" + # tables vs queries + # df = context.get_batch("moviedb/tables/ratings") + # df = context.get_batch("moviedb/queries/mynewquery", query="select * from ratings limit 100") + def test_list_datasources(data_context): datasources = data_context.list_datasources()
Added a note for a new test case
py
diff --git a/indra/tests/test_rest_api.py b/indra/tests/test_rest_api.py index <HASH>..<HASH> 100644 --- a/indra/tests/test_rest_api.py +++ b/indra/tests/test_rest_api.py @@ -174,3 +174,10 @@ def test_assemblers_graph(): res = _call_api('post', 'assemblers/graph', stmt_str) res_json = res.json() assert 'model' in res_json.keys() + + +def test_assemblers_english(): + stmt_str = json.dumps({'statements': [STMT_JSON]}) + res = _call_api('post', 'assemblers/english', stmt_str) + res_json = res.json() + assert 'sentences' in res_json.keys()
Add test of english assembler.
py
diff --git a/docs/conf.py b/docs/conf.py index <HASH>..<HASH> 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -57,6 +57,10 @@ sphinx_gallery_confg = { } } +import matplotlib as mpl +mpl.use('Agg') +import matplotlib.pyplot as plt + from glob import glob autosummary_generate = glob('*.rst')
trying to circumvent plt errors in sphinx build
py
diff --git a/superset/sql_lab.py b/superset/sql_lab.py index <HASH>..<HASH> 100644 --- a/superset/sql_lab.py +++ b/superset/sql_lab.py @@ -220,6 +220,7 @@ def execute_sql_statement(sql_statement, query, user_name, session, cursor): security_manager, ) query.executed_sql = sql + session.commit() with stats_timing("sqllab.query.time_executing_query", stats_logger): logging.info(f"Query {query_id}: Running query: \n{sql}") db_engine_spec.execute(cursor, sql, async_=True)
Add commit to attempt to resolve query table lock (#<I>)
py
diff --git a/angr/engines/vex/claripy/irop.py b/angr/engines/vex/claripy/irop.py index <HASH>..<HASH> 100644 --- a/angr/engines/vex/claripy/irop.py +++ b/angr/engines/vex/claripy/irop.py @@ -869,7 +869,7 @@ class SimIROp: rm = self._translate_rm(args[0] if rm_exists else claripy.BVV(0, 32)) arg = args[1 if rm_exists else 0] - return arg.val_to_fp(claripy.fp.FSort.from_size(self._output_size_bits), signed=True, rm=rm) + return arg.val_to_fp(claripy.fp.FSort.from_size(self._output_size_bits), signed=self._from_signed != 'U', rm=rm) def _op_fp_to_fp(self, args): rm_exists = self._from_size != 32 or self._to_size != 64
Add unsigned floating point conversions (#<I>)
py
diff --git a/tests/tasks.py b/tests/tasks.py index <HASH>..<HASH> 100644 --- a/tests/tasks.py +++ b/tests/tasks.py @@ -16,7 +16,6 @@ from kuyruk.exceptions import Discard # Override defaults for testing Config.WORKER_LOGGING_LEVEL = "debug" -Config.WORKER_MAX_LOAD = 999 kuyruk = Kuyruk()
no need to set max_load to a high value
py
diff --git a/djangular/styling/bootstrap3/widgets.py b/djangular/styling/bootstrap3/widgets.py index <HASH>..<HASH> 100644 --- a/djangular/styling/bootstrap3/widgets.py +++ b/djangular/styling/bootstrap3/widgets.py @@ -27,7 +27,7 @@ class ChoiceFieldRenderer(DjngChoiceFieldRenderer): class CheckboxInput(widgets.CheckboxInput): def __init__(self, label, attrs=None, check_test=None): # the label is rendered by the Widget class rather than by BoundField.label_tag() - self.choice_label = force_text(label) + self.choice_label = label super(CheckboxInput, self).__init__(attrs, check_test) def render(self, name, value, attrs=None):
Fixed: choice_label in CheckboxInput was not handled by i<I>n
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ def read(filename): setup( name = "ccsyspath", - version = "1.0.2", + version = "1.1.0", description = "Find the system include paths for clang and gcc C/C++ compilers", long_description = read('README.rst'), author = "Andrew Walker",
version bump for pypi release
py
diff --git a/markupsafe/tests.py b/markupsafe/tests.py index <HASH>..<HASH> 100644 --- a/markupsafe/tests.py +++ b/markupsafe/tests.py @@ -158,9 +158,9 @@ class MarkupLeakTestCase(unittest.TestCase): escape("<foo>") escape(u"foo") escape(u"<foo>") - gc.collect() + if hasattr(sys, 'pypy_version_info'): gc.collect() counts.add(len(gc.get_objects())) - assert len(counts) == 1, 'ouch, c extension seems to leak objects' + assert len(counts) == 1, 'ouch, c extension seems to leak objects, got: ' + str(len(counts)) def suite():
Made gc.collect() call conditional to running on pypy.
py
diff --git a/gala/potential/scf/core.py b/gala/potential/scf/core.py index <HASH>..<HASH> 100644 --- a/gala/potential/scf/core.py +++ b/gala/potential/scf/core.py @@ -213,6 +213,6 @@ def compute_coeffs_discrete(xyz, mass, nmax, lmax, r_s, if compute_var: Snlm_var[n,l,m], Tnlm_var[n,l,m], STnlm_var[n,l,m] = STnlm_var_discrete(s, phi, X, mass, n, l, m) if compute_var: - return (Snlm,Snlm_var), (Tnlm,Tnlm_var), (STnlm_var) + return Snlm, Tnlm, np.array([[Snlm_var, STnlm_var], [STnlm_var, Tnlm_var]]) else: return Snlm, Tnlm
return covariance matrix between SCF coefficients
py
diff --git a/AegeanTools/fits_image.py b/AegeanTools/fits_image.py index <HASH>..<HASH> 100644 --- a/AegeanTools/fits_image.py +++ b/AegeanTools/fits_image.py @@ -12,7 +12,7 @@ import logging,sys from math import pi,cos,sin,sqrt class FitsImage(): - def __init__(self, filename, hdu_index=0, hdu=None, beam=None): + def __init__(self, filename=None, hdu_index=0, hdu=None, beam=None): """ filename: the name of the fits image file hdu_index = index of FITS HDU when extensions are used (0 is primary HDU) @@ -108,8 +108,6 @@ class FitsImage(): assert pixels.shape == self._pixels.shape, "Shape mismatch between pixels supplied {0} and existing image pixels {1}".format(pixels.shape,self._pixels.shape) self._pixels = pixels - - def get_background_rms(self): ''' Return the background RMS (Jy)
It is no longer neccessary to supply fitsfilename when a FitsImage is being created from an hdu
py
diff --git a/pyt/vulnerabilities.py b/pyt/vulnerabilities.py index <HASH>..<HASH> 100644 --- a/pyt/vulnerabilities.py +++ b/pyt/vulnerabilities.py @@ -96,10 +96,12 @@ def is_sanitized(sink, sanitiser_dict): def get_vulnerability(source, sink, triggers): if source.cfg_node in sink.cfg_node.new_constraint: + source_trigger_word = source.trigger_word_tuple.trigger_word + sink_trigger_word = sink.trigger_word_tuple.trigger_word if not is_sanitized(sink, triggers.sanitiser_dict): - source_trigger_word = source.trigger_word_tuple.trigger_word - sink_trigger_word = sink.trigger_word_tuple.trigger_word return Vulnerability(source.cfg_node, source_trigger_word, sink.cfg_node, sink_trigger_word) + elif is_sanitized(sink, triggers.sanitiser_dict): + return SanitisedVulnerability(source.cfg_node, source_trigger_word, sink.cfg_node, sink_trigger_word, sink.trigger_word_tuple.sanitisers) return None def find_vulnerabilities(cfg_list, trigger_word_file=default_trigger_word_file):
Adding sanitised vulnerabilities
py
diff --git a/h11/_events.py b/h11/_events.py index <HASH>..<HASH> 100644 --- a/h11/_events.py +++ b/h11/_events.py @@ -70,6 +70,12 @@ class _EventBundle(object): return (self.__class__ == other.__class__ and self.__dict__ == other.__dict__) + def __ne__(self, other): + return not self.__eq__(other) + + # This is an unhashable type. + __hash__ = None + class Request(_EventBundle): """The beginning of an HTTP request.
Explicit equality and hashing on _EventBundle. These are both required for Python 2.
py
diff --git a/analyzers/VMRay/vmray.py b/analyzers/VMRay/vmray.py index <HASH>..<HASH> 100755 --- a/analyzers/VMRay/vmray.py +++ b/analyzers/VMRay/vmray.py @@ -89,7 +89,7 @@ class VMRayAnalyzer(Analyzer): else: level = "info" - if r["reports"] > 1: + if len(r["reports"]) > 1: value = "{}( from scan {})".format(s["score"], i) else: value = "{}".format(s["score"])
VMRay: fix comparison of list and int instead of len and int
py
diff --git a/jumeaux/executor.py b/jumeaux/executor.py index <HASH>..<HASH> 100644 --- a/jumeaux/executor.py +++ b/jumeaux/executor.py @@ -488,6 +488,14 @@ Please specify a valid name. logger_config.update({'disable_existing_loggers': False}) logging.config.dictConfig(logger_config) + logger.info(f"""[Config (from yaml files or report and args)] +---- + +{config.to_yaml()} + +---- +""") + # Requests logs: TList[Request] = global_addon_executor.apply_reqs2reqs( Reqs2ReqsAddOnPayload.from_dict({'requests': origin_logs}),
:hammer: Add config settings to log
py
diff --git a/astropy_helpers/commands/_test_compat.py b/astropy_helpers/commands/_test_compat.py index <HASH>..<HASH> 100644 --- a/astropy_helpers/commands/_test_compat.py +++ b/astropy_helpers/commands/_test_compat.py @@ -198,8 +198,13 @@ class AstropyTest(Command, object): build_cmd = self.get_finalized_command('build') new_path = os.path.abspath(build_cmd.build_lib) - self.tmp_dir = tempfile.mkdtemp(prefix=self.package_name + '-test-', - dir=self.temp_root) + # On OSX the default path for temp files is under /var, but in most + # cases on OSX /var is actually a symlink to /private/var; ensure we + # dereference that link, because py.test is very sensitive to relative + # paths... + tmp_dir = tempfile.mkdtemp(prefix=self.package_name + '-test-', + dir=self.temp_root) + self.tmp_dir = os.path.realpath(tmp_dir) self.testing_path = os.path.join(self.tmp_dir, os.path.basename(new_path)) shutil.copytree(new_path, self.testing_path)
This is a followup to #<I> needed for things to work properly in OSX, where tempfile.mkdtemp does *not* return a real path.
py