diff
stringlengths
139
3.65k
message
stringlengths
8
627
diff_languages
stringclasses
1 value
diff --git a/scanpy/plotting.py b/scanpy/plotting.py index <HASH>..<HASH> 100755 --- a/scanpy/plotting.py +++ b/scanpy/plotting.py @@ -73,7 +73,7 @@ def plot_tool(dplot, adata, if smp in adata.smp_keys(): if adata.smp[smp].dtype.char in ['S', 'U']: categorical = True - elif np.unique(adata.smp).size < 20: + elif np.unique(adata.smp).size < 13: categorical = True else: c = pl.cm.get_cmap(params['cmap'])(
threshold for categorical plotting at <I> categories
py
diff --git a/treeplacing/models.py b/treeplacing/models.py index <HASH>..<HASH> 100644 --- a/treeplacing/models.py +++ b/treeplacing/models.py @@ -69,7 +69,9 @@ class Node(object): def search(self, variants): assert self.children[0].parent is not None assert self.children[1].parent is not None - overlap = (len(set(self.children[0].phylo_snps) & set(variants)), len(set(self.children[1].phylo_snps) & set(variants) )) + overlap = [] + overlap = (float(len(set(self.children[0].phylo_snps) & set(variants)) )/ len(set(self.children[0].phylo_snps) | set(variants) ), + float(len(set(self.children[1].phylo_snps) & set(variants))) / len( set(self.children[1].phylo_snps) | set(variants) ) ) print overlap, self.children[0], self.children[1] # print overlap, len(variants), len(self.children[0].phylo_snps) if overlap[0] > overlap[1]:
Use & sharing instead of count for unbalanced choice
py
diff --git a/LiSE/LiSE/character.py b/LiSE/LiSE/character.py index <HASH>..<HASH> 100644 --- a/LiSE/LiSE/character.py +++ b/LiSE/LiSE/character.py @@ -127,9 +127,12 @@ class AbstractCharacter(MutableMapping): if name not in self.node: self.add_thing(name, location, **kwargs) return self.thing[name] - n = 0 - while name + str(n) in self.node: - n += 1 + if isinstance(name, str): + n = 0 + while name + str(n) in self.node: + n += 1 + elif name in self.node: + raise KeyError("Already have a thing named {}".format(name)) self.add_thing(name + str(n), location, **kwargs) return self.thing[name]
Don't try to autoincrement thing names if the name is not a string
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100755 --- a/setup.py +++ b/setup.py @@ -868,8 +868,8 @@ class SaltDistribution(distutils.dist.Distribution): 'Salt\'s pre-configured SPM formulas directory'), ('salt-spm-pillar-dir=', None, 'Salt\'s pre-configured SPM pillar directory'), - ('salt-spm-reactor-dir=', None, - 'Salt\'s pre-configured SPM reactor directory'), + ('salt-home-dir=', None, + 'Salt\'s pre-configured user home directory'), ] def __init__(self, attrs=None):
Adding home_dir to SaltDistribution.global_options.
py
diff --git a/glances/plugins/glances_batpercent.py b/glances/plugins/glances_batpercent.py index <HASH>..<HASH> 100644 --- a/glances/plugins/glances_batpercent.py +++ b/glances/plugins/glances_batpercent.py @@ -101,11 +101,8 @@ class glancesGrabBat: Update the stats """ if self.initok: - try: - self.bat.update() - except Exception: - self.bat_list = [] - else: + reply = self.bat.update() + if reply is not None: self.bat_list = [] new_item = {'label': _("Battery (%)"), 'value': self.getcapacitypercent()}
Fix bug when batinfo library is installed but no battery is available on the system
py
diff --git a/doc/conf.py b/doc/conf.py index <HASH>..<HASH> 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -92,7 +92,7 @@ pygments_style = 'sphinx' # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'default' +html_theme = 'classic' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the
Actually this change is needed just to be able to verify the documentation is ok with sphinx <I>.
py
diff --git a/pmagpy/new_builder.py b/pmagpy/new_builder.py index <HASH>..<HASH> 100644 --- a/pmagpy/new_builder.py +++ b/pmagpy/new_builder.py @@ -1200,6 +1200,12 @@ class MagicDataFrame(object): if name not in ['measurement', 'age']: self.df[name] = self.df.index elif name == 'measurement' and len(self.df): + if 'number' in self.df.columns: + self.df.rename(columns={'number':'treat_step_num'}, inplace=True) + if 'treat_step_num' not in self.df.columns: + print("-W- You are missing the 'treat_step_num' column in your measurements file") + print(" This may cause strange behavior in the analysis GUIs") + self.df['treat_step_num'] = '' self.df['measurement'] = self.df['experiment'] + self.df['treat_step_num'].astype(str)
MagicDataFrame: deal with number/treat_step_num in both places where self.df[‘measurement’] is set
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -5,10 +5,11 @@ from setuptools import setup setup(name='FoxPuppet', - version='0.1.0', + use_scm_version=True, description='Firefox user interface testing model for use with Selenium', long_description=open('README.rst').read(), url='https://github.com/mozilla/FoxPuppet', license='MPL2', packages=['foxpuppet'], - install_requires=['selenium>=3.0.1']) + install_requires=['selenium>=3.0.1'], + setup_requires=['setuptools_scm'])
Switch to setuptools_scm for managing versions
py
diff --git a/ai/backend/client/kernel.py b/ai/backend/client/kernel.py index <HASH>..<HASH> 100644 --- a/ai/backend/client/kernel.py +++ b/ai/backend/client/kernel.py @@ -28,7 +28,8 @@ class BaseKernel(BaseFunction): envs: Optional[Mapping[str, str]]=None, max_mem: int=0, exec_timeout: int=0) -> str: if client_token: - assert len(client_token) > 8 + assert 4 <= len(client_token) <= 64, \ + 'Client session token should be 4 to 64 characters long.' else: client_token = uuid.uuid4().hex resp = yield Request('POST', '/kernel/create', {
Fix the client-side validation of client token length.
py
diff --git a/aioinflux/__init__.py b/aioinflux/__init__.py index <HASH>..<HASH> 100644 --- a/aioinflux/__init__.py +++ b/aioinflux/__init__.py @@ -11,6 +11,6 @@ except ModuleNotFoundError: warnings.warn(no_pandas_warning) from .client import InfluxDBClient, InfluxDBError, logger -from .iterutils import iterpoints +from .iterutils import iterpoints, InfluxDBResult, InfluxDBChunkedResult __version__ = '0.3.0'
Expose InfluxDBResult and InfluxDBChunkedResult
py
diff --git a/utils/gh2k.py b/utils/gh2k.py index <HASH>..<HASH> 100755 --- a/utils/gh2k.py +++ b/utils/gh2k.py @@ -254,7 +254,8 @@ Thank you very much, def publish_twitter(twitter_contact, owner): """ Publish in twitter the dashboard """ dashboard_url = CAULDRON_DASH_URL + "/%s" % (owner) - tweet = "@%s your http://cauldron.io dashboard for #%s at GitHub is ready: %s. Check it out! #oscon #opendevmetrics" % (twitter_contact, owner, dashboard_url) + # tweet = "@%s your http://cauldron.io dashboard for #%s at GitHub is ready: %s. Check it out! #oscon #opendevmetrics" % (twitter_contact, owner, dashboard_url) + tweet = "@%s your http://cauldron.io dashboard for #%s at GitHub is ready: %s. Check it out! #oscon" % (twitter_contact, owner, dashboard_url) status = quote_plus(tweet) oauth = get_oauth() r = requests.post(url="https://api.twitter.com/1.1/statuses/update.json?status="+status, auth=oauth)
[gh2k] Remove #opendevmetrics to avoid using more than <I> chars in a tweet.
py
diff --git a/tools/interop_matrix/client_matrix.py b/tools/interop_matrix/client_matrix.py index <HASH>..<HASH> 100644 --- a/tools/interop_matrix/client_matrix.py +++ b/tools/interop_matrix/client_matrix.py @@ -101,7 +101,7 @@ LANG_RELEASE_MATRIX = { 'v1.7.4': None }, { - 'v1.8.1': None + 'v1.8.2': None }, ], 'java': [
Add go release versions to client_matrix.py
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -43,7 +43,7 @@ setup( 'Topic :: Software Development :: Testing' ], packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), - download_url='https://github.com/avihad/twistes/tarball/{version}'.format(version=main_ns['__version__']), + download_url='https://github.com/avihad/twistes/tarball/v{version}'.format(version=main_ns['__version__']), zip_safe=False, include_package_data=True, install_requires=install_requires,
Update github source link in pypi setup file
py
diff --git a/publ/rendering.py b/publ/rendering.py index <HASH>..<HASH> 100644 --- a/publ/rendering.py +++ b/publ/rendering.py @@ -290,10 +290,10 @@ def render_entry(entry_id, slug_text='', category=''): raise http_error.NotFound("No such entry") # Show an access denied error if the entry has been set to draft mode - if record.status == model.PublishStatus.DRAFT: + if record.status == model.PublishStatus.DRAFT.value: raise http_error.Forbidden("Entry not available") # Show a gone error if the entry has been deleted - if record.status == model.PublishStatus.GONE: + if record.status == model.PublishStatus.GONE.value: raise http_error.Gone() # read the entry from disk
Fix PonyORM-related DRAFT/GONE regression
py
diff --git a/jsonschema/tests/test_validators.py b/jsonschema/tests/test_validators.py index <HASH>..<HASH> 100644 --- a/jsonschema/tests/test_validators.py +++ b/jsonschema/tests/test_validators.py @@ -100,14 +100,6 @@ class TestLegacyTypeCheckCreation(TestCase): self.smelly = mock.MagicMock() self.validators = {u"smelly": self.smelly} - def test_empty_dict_is_default(self): - definitions = validators._generate_legacy_type_checks() - self.assertEqual(definitions, {}) - - def test_functions_are_created(self): - definitions = validators._generate_legacy_type_checks({"object": dict}) - self.assertTrue(callable(definitions["object"])) - def test_default_types_used_if_no_type_checker_given(self): Validator = validators.create( meta_schema=self.meta_schema,
This is private, no need to unit test it directly.
py
diff --git a/src/psd_tools/decoder/tagged_blocks.py b/src/psd_tools/decoder/tagged_blocks.py index <HASH>..<HASH> 100644 --- a/src/psd_tools/decoder/tagged_blocks.py +++ b/src/psd_tools/decoder/tagged_blocks.py @@ -136,13 +136,13 @@ def _decode_type_tool_object_setting(data): # This decoder needs to be updated if we have new formats. if ver != 1 or txt_ver != 50 or desc_ver1 != 16: warnings.warn("Ignoring type setting tagged block due to old versions") - return + return data try: text_data = decode_descriptor(None, fp) except UnknownOSType as e: warnings.warn("Ignoring type setting tagged block (%s)" % e) - return + return data # XXX: Until Engine Data is parsed properly, the following cannot be parsed. # The end of the engine data dictates where this starts.
preserve raw data if descriptor can't be parsed
py
diff --git a/custodia/server/__init__.py b/custodia/server/__init__.py index <HASH>..<HASH> 100644 --- a/custodia/server/__init__.py +++ b/custodia/server/__init__.py @@ -26,16 +26,16 @@ logger = logging.getLogger('custodia') CONFIG_SPECIALS = ['authenticators', 'authorizers', 'consumers', 'stores'] -argparser = argparse.ArgumentParser( +default_argparser = argparse.ArgumentParser( prog='custodia', description='Custodia server' ) -argparser.add_argument( +default_argparser.add_argument( '--debug', action='store_true', help='Debug mode' ) -argparser.add_argument( +default_argparser.add_argument( 'configfile', nargs='?', type=argparse.FileType('r'), @@ -191,7 +191,9 @@ def parse_config(args): return config -def main(): +def main(argparser=None): + if argparser is None: + argparser = default_argparser args = argparser.parse_args() config = parse_config(args) log.setup_logging(config['debug'], config['auditlog'])
Provide an API to run server with custom argparser custodia.server.main() now accepts an optional argparser to replace the default argparser with a custom argparser instance. It allows easy customization of server, e.g. for different default config file, prog name etc. I plan to use the feature in FreeIPA to provide an alternative script to run FreeIPA's Custodia instance. The instance needs a different default config file and SELinux context.
py
diff --git a/raiden/tests/integration/test_stress.py b/raiden/tests/integration/test_stress.py index <HASH>..<HASH> 100644 --- a/raiden/tests/integration/test_stress.py +++ b/raiden/tests/integration/test_stress.py @@ -296,6 +296,7 @@ def assert_channels(raiden_network, token_network_identifier, deposit): @pytest.mark.parametrize('deposit', [5]) @pytest.mark.parametrize('reveal_timeout', [15]) @pytest.mark.parametrize('settle_timeout', [120]) [email protected](reason='Is flaky. Issue: #2492') def test_stress( raiden_network, deposit,
Skip stress test due to being flaky Related to: <URL>
py
diff --git a/tests/integration-tests/virtual_environments.py b/tests/integration-tests/virtual_environments.py index <HASH>..<HASH> 100644 --- a/tests/integration-tests/virtual_environments.py +++ b/tests/integration-tests/virtual_environments.py @@ -55,7 +55,7 @@ def prepare_virtualenv(packages=()): vpython = os.path.join(vbin, 'python' + get_exe_suffix()) vpip = os.path.join(vbin, 'pip' + get_exe_suffix()) - vpip_install = [vpip, "install", "--force-reinstall"] + vpip_install = [vpython, "-m", "pip", "install", "--force-reinstall"] if (2, 5) <= sys.version_info < (2, 6): vpip_install.append("--insecure")
Fix tests: run pip from python not directly
py
diff --git a/aioapp/amqp.py b/aioapp/amqp.py index <HASH>..<HASH> 100644 --- a/aioapp/amqp.py +++ b/aioapp/amqp.py @@ -92,8 +92,8 @@ class Channel: 'amqp:publish {} {}'.format(exchange_name, routing_key), CLIENT ) - context_span.tag(SPAN_TYPE, SPAN_TYPE_AMQP, True) - context_span.tag(SPAN_KIND, SPAN_KIND_AMQP_OUT, True) + span.tag(SPAN_TYPE, SPAN_TYPE_AMQP, True) + span.tag(SPAN_KIND, SPAN_KIND_AMQP_OUT, True) if propagate_trace: headers = context_span.make_headers() properties = properties or {}
fix tracer span tag for amqp
py
diff --git a/nba_py/shotchart.py b/nba_py/shotchart.py index <HASH>..<HASH> 100644 --- a/nba_py/shotchart.py +++ b/nba_py/shotchart.py @@ -45,7 +45,7 @@ class ShotChart: 'OpponentTeamID' : opponent_team_id, 'VsConference' : vs_conf, 'VsDivision' : vs_div, - 'Position' : position, + 'PlayerPosition' : position, 'GameSegment' : game_segment, 'Period' : period, 'LastNGames' : last_n_games,
Changes Position to PlayerPosition to appease the shotchart endpoint
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -30,6 +30,7 @@ setup( "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", "Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License",
Update python support.
py
diff --git a/testproject/tests/tests/test_restframework.py b/testproject/tests/tests/test_restframework.py index <HASH>..<HASH> 100644 --- a/testproject/tests/tests/test_restframework.py +++ b/testproject/tests/tests/test_restframework.py @@ -34,14 +34,14 @@ class MockModelSerializer(serializers.ModelSerializer): class Meta: model = models.MockModel - fields = '__all__' + fields = ('field',) class MockFileModelSerializer(serializers.ModelSerializer): class Meta: model = models.MockFileModel - fields = '__all__' + fields = ('field', 'file') class RetrieveUpdateAPIView(generics.RetrieveUpdateAPIView):
Fix tests to run with drf <I> & <I>.
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ REQUIRES = [] setup( name='stackinabox', version='0.1', - description='OpenStack/Rackspace Service Testing Suite', + description='RESTful API Testing Suite', license='Apache License 2.0', url='https://github.com/BenjamenMeyer/stackInABox', author='Benjamen R. Meyer',
Updated package description to match the README
py
diff --git a/bcbio/provenance/programs.py b/bcbio/provenance/programs.py index <HASH>..<HASH> 100644 --- a/bcbio/provenance/programs.py +++ b/bcbio/provenance/programs.py @@ -44,6 +44,7 @@ def _broad_versioner(type): elif type == "picard": return runner.get_picard_version("ViewSam") elif type == "mutect": + runner = broad.runner_from_config(config, "mutect") return jar_versioner("mutect", "muTect")(config) + runner.mutect_type() else: raise NotImplementedError(type)
bcbio/provenance/programs.py fixed for mutect version detection
py
diff --git a/command/build_ext.py b/command/build_ext.py index <HASH>..<HASH> 100644 --- a/command/build_ext.py +++ b/command/build_ext.py @@ -748,7 +748,7 @@ class build_ext(Command): if sysconfig.get_config_var('Py_ENABLE_SHARED'): pythonlib = 'python{}.{}{}'.format( sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff, - sys.abiflags) + sysconfig.get_config_var('ABIFLAGS')) return ext.libraries + [pythonlib] else: return ext.libraries
- Issue #<I>: Fix linking extension modules for cross builds. Patch by Xavier de Gaye.
py
diff --git a/chef/lib/chef/provider/package/yum-dump.py b/chef/lib/chef/provider/package/yum-dump.py index <HASH>..<HASH> 100644 --- a/chef/lib/chef/provider/package/yum-dump.py +++ b/chef/lib/chef/provider/package/yum-dump.py @@ -120,10 +120,17 @@ def dump_packages(yb, list): pkg.type = 'a' packages[str(pkg)] = pkg - # These are both installed and available - for pkg in db.reinstall_available: - pkg.type = 'r' - packages[str(pkg)] = pkg + if YUM_VER == 2: + # ugh - can't get the availability state of our installed rpms, lets assume + # they are available to install + for pkg in db.installed: + pkg.type = 'r' + packages[str(pkg)] = pkg + else: + # These are both installed and available + for pkg in db.reinstall_available: + pkg.type = 'r' + packages[str(pkg)] = pkg unique_packages = packages.values()
Lame fix for RHEL4 - mark all installed packages as available for reinstall.
py
diff --git a/aiohttp/multipart.py b/aiohttp/multipart.py index <HASH>..<HASH> 100644 --- a/aiohttp/multipart.py +++ b/aiohttp/multipart.py @@ -647,15 +647,22 @@ class MultipartReader: if chunk == b'': raise ValueError("Could not find starting boundary %r" % (self._boundary)) - if chunk.startswith(self._boundary): + newline = None + end_boundary = self._boundary + b'--' + if chunk.startswith(end_boundary): + _, newline = chunk.split(end_boundary, 1) + elif chunk.startswith(self._boundary): _, newline = chunk.split(self._boundary, 1) - assert newline in (b'\r\n', b'\n') + if newline is not None: + assert newline in (b'\r\n', b'\n'), (newline, + chunk, + self._boundary) self._newline = newline chunk = chunk.rstrip() if chunk == self._boundary: return - elif chunk == self._boundary + b'--': + elif chunk == end_boundary: self._at_eof = True return
Correctly handle newline detection not only for open boundary but for close mark too (#<I>)
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100755 --- a/setup.py +++ b/setup.py @@ -13,7 +13,7 @@ setup( install_requires=['requests>=1.2.3', 'requests_oauthlib>=0.3.3', 'tlslite>=0.4.4'], - setup_requires=['sphinx', 'requests_oauthlib'], + setup_requires=['sphinx'], tests_require=['tlslite>=0.4.4','xmlrunner>=1.7.3', 'requests>=1.2.3'], extras_require={ 'magic': ['filemagic>=1.6'],
remove `requests_oauthlib` from `setup_requires` I can't see why this is needed (install works without it), but if a conflicting (older) version of e.g. oauthlib is already installed, the install fails with pkg_resources.VersionConflict
py
diff --git a/pysc2/env/sc2_env.py b/pysc2/env/sc2_env.py index <HASH>..<HASH> 100644 --- a/pysc2/env/sc2_env.py +++ b/pysc2/env/sc2_env.py @@ -626,6 +626,8 @@ class SC2Env(environment.Base): if (self._save_replay_episodes > 0 and self._episode_count % self._save_replay_episodes == 0): self.save_replay(self._replay_dir, self._replay_prefix) + if self._episode_steps >= 524000: + logging.info("Likely ended due to SC2's max step count of 2^19=524288.") logging.info(("Episode %s finished after %s game steps. " "Outcome: %s, reward: %s, score: %s"), self._episode_count, self._episode_steps, outcome, reward,
Note the reason for ending after 2^<I> steps. PiperOrigin-RevId: <I>
py
diff --git a/grlc.py b/grlc.py index <HASH>..<HASH> 100755 --- a/grlc.py +++ b/grlc.py @@ -2,9 +2,9 @@ #!/usr/bin/env python from flask import Flask, request, jsonify, render_template +import urllib import urllib2 import json -from SPARQLWrapper import SPARQLWrapper, JSON import StringIO import logging import re @@ -170,14 +170,18 @@ def query(user, repo, query): query = rewrite_query(raw_query, request.args) - - sparql = SPARQLWrapper(endpoint) - app.logger.debug("Sending query:\n" + query) - sparql.setQuery(query) - sparql.setReturnFormat(JSON) - results = sparql.query().convert() - - return jsonify(results) + # Preapre HTTP request + headers = { + 'Accept' : request.headers['Accept'] + } + data = { + 'query' : query + } + data_encoded = urllib.urlencode(data) + req = urllib2.Request(endpoint, data_encoded, headers) + response = urllib2.urlopen(req) + + return response.read() @app.route('/<user>/<repo>/api-docs') def api_docs(user, repo):
Bypassing accept headers, removing SPARQLWrapper
py
diff --git a/controller/api/models.py b/controller/api/models.py index <HASH>..<HASH> 100644 --- a/controller/api/models.py +++ b/controller/api/models.py @@ -381,7 +381,7 @@ class App(UuidAuditedModel): # HACK (bacongobbler): we need to wait until publisher has a chance to publish each # service to etcd, which can take up to 20 seconds. time.sleep(20) - for i in range(len(intervals)): + for i in xrange(len(intervals)): delay = int(config.get('HEALTHCHECK_INITIAL_DELAY', 0)) try: # sleep until the initial timeout is over
ref(controller): switch to xrange `range` returns a copy of the list in-memory at the first iteration, whereas `xrange` lazily evaluates, causing only one value present during runtime. Ergo, it is faster and more efficient.
py
diff --git a/examples/sampleserver.py b/examples/sampleserver.py index <HASH>..<HASH> 100644 --- a/examples/sampleserver.py +++ b/examples/sampleserver.py @@ -76,14 +76,16 @@ class ConcreteServer(OpenIDServer): def get_lifetime(self, req): return self.lifespan -## def get_user_setup_url(self, req): -## args = { -## 'identity': req.identity, -## 'trust_root': req.trust_root, -## 'return_to': req.return_to, -## 'success_to': req.return_to, -## } -## return append_args('http://localhost:8082/?action=allow', args) + def get_user_setup_url(self, req): + args = { + 'identity': req.identity, + 'trust_root': req.trust_root, + 'fail_to': append_args(req.return_to, {'openid.mode': 'cancel'}), + 'success_to': append_args(addr, req.args), + 'action':'allow', + } + url = append_args(addr, args) + return url def get_setup_response(self, req): args = {
[project @ reimplement get_user_setup_url]
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -67,6 +67,7 @@ setup( classifiers=[ 'Private :: Do Not Upload to pypi server', ], + packages=[], install_requires=[ 'pip', ],
fix: main setup.py packages should be given explicitly
py
diff --git a/msk/util.py b/msk/util.py index <HASH>..<HASH> 100644 --- a/msk/util.py +++ b/msk/util.py @@ -37,9 +37,8 @@ from msk import __version__ from msk.exceptions import PRModified, MskException, SkillNameTaken ASKPASS = '''#!/usr/bin/env python3 -import sys -print(r"""{token}""" -)''' +print(r"""{token}""") +''' skills_kit_footer = '<sub>Created with [mycroft-skills-kit]({}) v{}</sub>' \ .format('https://github.com/mycroftai/mycroft-skills-kit',
Simplify ask-pass script sys isn't used, it only needs to print the token to stdout for both user and password.
py
diff --git a/spacy/about.py b/spacy/about.py index <HASH>..<HASH> 100644 --- a/spacy/about.py +++ b/spacy/about.py @@ -3,7 +3,7 @@ # https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py __title__ = 'spacy-nightly' -__version__ = '2.0.0a18' +__version__ = '2.0.0a19' __summary__ = 'Industrial-strength Natural Language Processing (NLP) with Python and Cython' __uri__ = 'https://spacy.io' __author__ = 'Explosion AI'
Increment version to <I>a<I>
py
diff --git a/teslajsonpy/controller.py b/teslajsonpy/controller.py index <HASH>..<HASH> 100644 --- a/teslajsonpy/controller.py +++ b/teslajsonpy/controller.py @@ -149,7 +149,7 @@ async def wake_up(wrapped, instance, args, kwargs) -> Callable: is_wake_command = False is_energysite_command = False if wrapped.__name__ == "api": - car_id = kwargs.get("vehicle_id", "") + car_id = kwargs.get("path_vars", {}).get("vehicle_id", "") else: car_id = args[0] if not kwargs.get("vehicle_id") else kwargs.get("vehicle_id") is_wake_command = len(args) >= 2 and args[1] == "wake_up"
fix: fix extraction of car_id from api call
py
diff --git a/drench/peer.py b/drench/peer.py index <HASH>..<HASH> 100644 --- a/drench/peer.py +++ b/drench/peer.py @@ -249,6 +249,8 @@ class Peer(object): # Write out byte_index = piece_index * self.torrent.piece_length + self.piece = self.init_piece() + self.request_all() self.torrent.switchboard.write(byte_index, piece_bytes) self.torrent.switchboard.mark_off(piece_index) print self.torrent.switchboard.bitfield @@ -257,8 +259,8 @@ class Peer(object): self.reactor.is_running = False else: print "Bad data -- hash doesn't match. Discarding piece." - self.piece = self.init_piece() - self.request_all() + self.piece = self.init_piece() + self.request_all() def pcancel(self): print 'pcancel'
Stop waiting to write out before requesting next piece
py
diff --git a/mailqueue/models.py b/mailqueue/models.py index <HASH>..<HASH> 100644 --- a/mailqueue/models.py +++ b/mailqueue/models.py @@ -6,6 +6,7 @@ # #---------------------------------------------# import datetime +from django.utils.timezone import utc from django.db import models from django.core.mail import EmailMultiAlternatives @@ -31,7 +32,7 @@ class MailerMessage(models.Model): def send(self): if not self.sent: - self.last_attempt = datetime.datetime.now() + self.last_attempt = datetime.datetime.utcnow().replace(tzinfo=utc) try: subject, from_email, to = self.subject, self.from_address, self.to_address text_content = self.content
Fixed warning when timezone support is enabled.
py
diff --git a/gitlab/__init__.py b/gitlab/__init__.py index <HASH>..<HASH> 100644 --- a/gitlab/__init__.py +++ b/gitlab/__init__.py @@ -1720,7 +1720,7 @@ class Gitlab(object): else: return False - def createfile(self, project_id, file_path, branch_name, content, commit_message): + def createfile(self, project_id, file_path, branch_name, encoding, content, commit_message): """Creates a new file in the repository :param project_id: project id @@ -1730,7 +1730,7 @@ class Gitlab(object): :param commit_message: Commit message :return: true if success, false if not """ - data = {"file_path": file_path, "branch_name": branch_name, + data = {"file_path": file_path, "branch_name": branch_name, "encoding": encoding, "content": content, "commit_message": commit_message} request = requests.post("{0}/{1}/repository/files".format(self.projects_url, project_id), verify=self.verify_ssl, headers=self.headers, data=data)
Add a parameter 'encoding' to function createfile() to support upload file that is in text or encoded in base<I>
py
diff --git a/asana/client.py b/asana/client.py index <HASH>..<HASH> 100644 --- a/asana/client.py +++ b/asana/client.py @@ -1,19 +1,19 @@ +from types import ModuleType +import json +import platform +import time + +import requests + from . import session from . import resources from . import error from . import version from .page_iterator import CollectionPageIterator -from types import ModuleType -import requests -import json -import platform -import sys -import time - -if sys.version_info.major == 3: +try: import urllib.parse as urlparse -else: +except ImportError: import urllib as urlparse # Create a dict of resource classes
use try block for conditional import in order to support <I> and earlier
py
diff --git a/tensor2tensor/models/video/sv2p.py b/tensor2tensor/models/video/sv2p.py index <HASH>..<HASH> 100644 --- a/tensor2tensor/models/video/sv2p.py +++ b/tensor2tensor/models/video/sv2p.py @@ -132,10 +132,7 @@ class NextFrameSv2p(base.NextFrameBase, base_vae.NextFrameBaseVae): enc2, input_reward, "reward_enc") if latent is not None and not concat_latent: with tf.control_dependencies([latent]): - # This is the original SV2P implementation - # But we will tile and concat to support various latent sizes. - # enc2 = tf.concat([enc2, latent], axis=3) - enc2 = tile_and_concat(enc2, latent, concat_latent=concat_latent) + enc2 = tf.concat([enc2, latent], axis=3) enc3 = tfl.conv2d(enc2, hidden4.get_shape()[3], [1, 1], strides=(1, 1), padding="SAME", activation=tf.nn.relu, name="conv4")
Rolling back an SV2P modification which completely broke the model. PiperOrigin-RevId: <I>
py
diff --git a/cacheback/base.py b/cacheback/base.py index <HASH>..<HASH> 100644 --- a/cacheback/base.py +++ b/cacheback/base.py @@ -384,7 +384,8 @@ class Job(object): # ASYNC HELPER METHODS # -------------------- - def job_refresh(klass_str, obj_args, obj_kwargs, call_args, call_kwargs): + @classmethod + def job_refresh(cls, klass_str, obj_args, obj_kwargs, call_args, call_kwargs): """ Re-populate cache using the given job class.
Added missing classmethod decorator.
py
diff --git a/bin/build.py b/bin/build.py index <HASH>..<HASH> 100755 --- a/bin/build.py +++ b/bin/build.py @@ -16,6 +16,10 @@ def main(): for dirname in dirs: if not dirname.startswith('de'): continue + if not os.path.isdir(dirname): + os.mkdir(dirname) + with open(os.path.join(dirname, '__init__.py'), 'w') as f: + pass dirpath = os.path.join(topdir, dirname) filenames = os.listdir(dirpath) headername = [ n for n in filenames if n.startswith('header') ][0] @@ -131,9 +135,9 @@ def main(): ] for dataset in datasets for csi in range(cs) ]) print 'polynomials', planet + 1, a.shape - np.save('jpl-%02d' % (planet + 1), a) + np.save(os.path.join(dirname, 'jpl-%02d' % (planet + 1)), a) - np.save('constants', constants) + np.save(os.path.join(dirname, 'constants'), constants) if __name__ == '__main__': main()
Script build.py now saves numpy arrays to directories with names like "de<I>".
py
diff --git a/src/cobra/test/test_flux_analysis/test_variability.py b/src/cobra/test/test_flux_analysis/test_variability.py index <HASH>..<HASH> 100644 --- a/src/cobra/test/test_flux_analysis/test_variability.py +++ b/src/cobra/test/test_flux_analysis/test_variability.py @@ -6,7 +6,6 @@ from __future__ import absolute_import import numpy as np import pytest -from six import iteritems from cobra.exceptions import Infeasible from cobra.flux_analysis.variability import (
refactor: remove six usage in test_variability.py
py
diff --git a/SpiffWorkflow/serializer/dict.py b/SpiffWorkflow/serializer/dict.py index <HASH>..<HASH> 100644 --- a/SpiffWorkflow/serializer/dict.py +++ b/SpiffWorkflow/serializer/dict.py @@ -691,14 +691,15 @@ class DictionarySerializer(Serializer): # As we serialize back up, keep only one copy of any sub_workflow s_state['sub_workflows'] = {} for name, task in mylist: - if 'sub_workflows' in task: - s_state['sub_workflows'].update(task['sub_workflows']) - del task['sub_workflows'] if 'spec' in task: spec = json.loads(task['spec']) - s_state['sub_workflows'][spec['name']] = task['spec'] - del task['spec'] + if 'sub_workflows' in spec: + s_state['sub_workflows'].update(spec['sub_workflows']) + del spec['sub_workflows'] + if spec['name'] not in s_state['sub_workflows']: + s_state['sub_workflows'][spec['name']] = json.dumps(spec) task['spec_name'] = spec['name'] + del task['spec'] if hasattr(spec,'end'): s_state['end']=spec.end.id
Assure we pull all the sub-workflow definitions to the top. Not adding a specific test for this, but save/restore tests this well across many workflows.
py
diff --git a/downhill/base.py b/downhill/base.py index <HASH>..<HASH> 100644 --- a/downhill/base.py +++ b/downhill/base.py @@ -8,7 +8,6 @@ import numpy as np import theano import theano.tensor as TT import warnings -import sys from . import util @@ -110,10 +109,7 @@ class Optimizer(util.Registrar(str('Base'), (), {})): name = 'unnamed{}'.format(unnamed) unnamed += 1 logging.warn('%s unnamed, will be "%s" internally', p, name) - d = '∂{}' - if getattr(sys.stdout, 'encoding', 'UTF-8') == 'UTF-8': - d = 'grad({})' - self._monitor_names.append(d.format(name)) + self._monitor_names.append('grad({})'.format(name)) self._monitor_exprs.append((g * g).sum()) def _compile(self):
Don't bother with utf-8 grad symbol. :-/
py
diff --git a/rinoh/style.py b/rinoh/style.py index <HASH>..<HASH> 100644 --- a/rinoh/style.py +++ b/rinoh/style.py @@ -95,7 +95,10 @@ class Style(dict): return copy def __getattr__(self, attribute): - return self[attribute] + if attribute in self._supported_attributes(): + return self[attribute] + else: + return super().__getattr__(attribute) def __getitem__(self, attribute): """Return the value of `attribute`.
Only forward *style* attribute to item lookups Avoids trouble when Sphinx tries to access __getstate__ and __setstate__
py
diff --git a/salt/utils/lazy.py b/salt/utils/lazy.py index <HASH>..<HASH> 100644 --- a/salt/utils/lazy.py +++ b/salt/utils/lazy.py @@ -98,8 +98,16 @@ class LazyDict(collections.MutableMapping): ''' Check if the name is in the dict and return it if it is ''' - if name in self._dict: - return self._dict[name] + if name not in self._dict and not self.loaded: + # load the item + if self._load(name): + log.debug('LazyLoaded {0}'.format(name)) + return self._dict[name] + else: + log.debug('Could not LazyLoad {0}'.format(key)) + raise KeyError(key) + elif name in self: + return self[name] raise AttributeError(name) def __len__(self):
Makes changes as suggested by Thomas so the loader does not break
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -29,6 +29,8 @@ setup( 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', ], license='BSD', )
Should support Python <I>/<I>
py
diff --git a/foolbox/attacks/ead.py b/foolbox/attacks/ead.py index <HASH>..<HASH> 100644 --- a/foolbox/attacks/ead.py +++ b/foolbox/attacks/ead.py @@ -177,7 +177,7 @@ class EADAttack(MinimizationAttack): break # stop optimization if there has been no progress loss_at_previous_check = loss.item() - found_advs_iter = is_adversarial(x_k, logits) + found_advs_iter = is_adversarial(x_k, model(x_k)) best_advs, best_advs_norms = _apply_decision_rule( self.decision_rule,
Fix bug when adversarial check is performed at y_k (#<I>)
py
diff --git a/hvac/api/system_backend/health.py b/hvac/api/system_backend/health.py index <HASH>..<HASH> 100644 --- a/hvac/api/system_backend/health.py +++ b/hvac/api/system_backend/health.py @@ -63,7 +63,7 @@ class Health(SystemBackendMixin): api_path = utils.format_url('/v1/sys/health') return self._adapter.get( url=api_path, - json=params, + params=params, raise_exception=False, ) else:
fix health.py read_health_status GET method to use query-strings intead of post body
py
diff --git a/eulfedora/models.py b/eulfedora/models.py index <HASH>..<HASH> 100644 --- a/eulfedora/models.py +++ b/eulfedora/models.py @@ -1027,8 +1027,10 @@ class DigitalObject(object): self.default_pidspace = default_pidspace except AttributeError: # allow extending classes to make default_pidspace a custom property, - # but warn in case of conflict - logger.warn("Failed to set requested default_pidspace %s" % default_pidspace) + # but warn if there is case of conflict + if default_pidspace != getattr(self, 'default_pidspace', None): + logger.warn("Failed to set requested default_pidspace %s (using %s instead)" \ + % (default_pidspace, self.default_pidspace)) # cache object profile, track if it is modified and needs to be saved self._info = None self.info_modified = False
revise default pidspace warning to include more information, only warn if there is a conflict
py
diff --git a/examples/blueprints.py b/examples/blueprints.py index <HASH>..<HASH> 100644 --- a/examples/blueprints.py +++ b/examples/blueprints.py @@ -18,7 +18,7 @@ async def foo2(request): return json({'msg': 'hi from blueprint2'}) -app.register_blueprint(blueprint) -app.register_blueprint(blueprint2) +app.blueprint(blueprint) +app.blueprint(blueprint2) app.run(host="0.0.0.0", port=8000, debug=True)
use blueprint method instead of deprecated register_blueprint
py
diff --git a/auth0/v3/management/__init__.py b/auth0/v3/management/__init__.py index <HASH>..<HASH> 100644 --- a/auth0/v3/management/__init__.py +++ b/auth0/v3/management/__init__.py @@ -9,6 +9,7 @@ from .email_templates import EmailTemplates from .emails import Emails from .grants import Grants from .guardian import Guardian +from .hooks import Hooks from .jobs import Jobs from .logs import Logs from .resource_servers import ResourceServers
Import hooks in management __init__ so that 'from auth0.v3.management import Hooks' is possible
py
diff --git a/pgpy/packet/types.py b/pgpy/packet/types.py index <HASH>..<HASH> 100644 --- a/pgpy/packet/types.py +++ b/pgpy/packet/types.py @@ -157,9 +157,19 @@ class HashAlgo(PFIntEnum): @property def digestlen(self): - if self == HashAlgo.SHA1: + if self == HashAlgo.MD5: + return 128 + + if self in [HashAlgo.SHA1, + HashAlgo.RIPEMD160]: return 160 + if self in [HashAlgo.SHA256, + HashAlgo.SHA384, + HashAlgo.SHA512, + HashAlgo.SHA224]: + return int(self.name[-3:]) + raise NotImplementedError(self.name) # pragma: no cover @property
completed HashAlgo.digestlen
py
diff --git a/openxc/tools/diagnostics.py b/openxc/tools/diagnostics.py index <HASH>..<HASH> 100644 --- a/openxc/tools/diagnostics.py +++ b/openxc/tools/diagnostics.py @@ -47,7 +47,7 @@ class ResponseHandler(object): if arguments.pid is not None: request['request']['pid'] = int(arguments.pid, 0) if arguments.frequency is not None: - request['frequency'] = int(arguments.frequency, 0) + request['request']['frequency'] = int(arguments.frequency, 0) controller.diagnostic_request(request)
When sending a diag request, put frequency in request object not parent.
py
diff --git a/tests/python/unittest/test_optimizer.py b/tests/python/unittest/test_optimizer.py index <HASH>..<HASH> 100644 --- a/tests/python/unittest/test_optimizer.py +++ b/tests/python/unittest/test_optimizer.py @@ -702,7 +702,7 @@ class PySignum(mx.optimizer.Optimizer): else: weight[:] = (1 - lr*(wd+self.wd_lh))*weight - lr*mx.nd.sign(grad) -@with_seed(0) +@with_seed() def test_signum(): opt1 = PySignum opt2 = mx.optimizer.Signum
removed fixed from test_optimizer.test_signum (#<I>)
py
diff --git a/h2o-py/h2o/frame.py b/h2o-py/h2o/frame.py index <HASH>..<HASH> 100644 --- a/h2o-py/h2o/frame.py +++ b/h2o-py/h2o/frame.py @@ -664,7 +664,7 @@ class H2OVec: # whole vec replacement self._len_check(b) # lazy update in-place of the whole vec - self._expr = Expr("=", Expr("[", self._expr, b), Expr(c)) + self._expr = Expr("=", Expr("[", self._expr, b), None if c is None else Expr(c)) else: raise NotImplementedError("Only vector replacement is currently supported.")
handle assign null over 0 ... as well as assign 0 over null
py
diff --git a/holoviews/core/dimension.py b/holoviews/core/dimension.py index <HASH>..<HASH> 100644 --- a/holoviews/core/dimension.py +++ b/holoviews/core/dimension.py @@ -277,7 +277,7 @@ class LabelledData(param.Parameterized): label = param.String(default='', constant=True, doc=""" Optional label describing the data, typically reflecting where or how it was measured. The label should allow a specific - measurement or dataset to be referenced for a given group.""") + measurement or dataset to be referenced for a given group..""") _deep_indexable = False
Temporarily reverted typo fix to get tests passing
py
diff --git a/pmagpy/ipmag.py b/pmagpy/ipmag.py index <HASH>..<HASH> 100755 --- a/pmagpy/ipmag.py +++ b/pmagpy/ipmag.py @@ -9198,6 +9198,7 @@ def aniso_magic(infile='specimens.txt', samp_file='samples.txt', site_file='site vec=vec, num_bootstraps=num_bootstraps, title=site) files = {key: loc + "_" + site +"_" + crd + "_aniso-" + key + ".png" for (key, value) in figs.items()} if pmagplotlib.isServer: + titles = {} for key in figs.keys(): files[key] = "LO:_" + loc + "_SI:_" + site + '_TY:_aniso_' + key + '_.' + fmt titles = {} @@ -9241,6 +9242,7 @@ def aniso_magic(infile='specimens.txt', samp_file='samples.txt', site_file='site locs = "-".join(locs) files = {key: locs + "_" + crd + "_aniso-" + key + ".png" for (key, value) in figs.items()} if pmagplotlib.isServer: + titles = {} for key in figs.keys(): files[key] = 'MC:_' + con_id + '_TY:_aniso_' + key + '_.' + fmt titles = {}
fix aniso_magic error when there are no figures for server, #<I>
py
diff --git a/unittests/test_xml_generators.py b/unittests/test_xml_generators.py index <HASH>..<HASH> 100644 --- a/unittests/test_xml_generators.py +++ b/unittests/test_xml_generators.py @@ -35,6 +35,7 @@ class Test(parser_test_case.parser_test_case_t): self.assertFalse(gen.is_gccxml) self.assertTrue(gen.is_castxml) self.assertTrue(gen.is_castxml1) + self.assertEqual(str(gen.xml_output_version), "1.1.0") def _test_impl( self, gccxml_cvs_revision, is_castxml,
Test if the version is correctly set
py
diff --git a/scuba/__main__.py b/scuba/__main__.py index <HASH>..<HASH> 100644 --- a/scuba/__main__.py +++ b/scuba/__main__.py @@ -246,7 +246,14 @@ class ScubaDive(object): raise ScubaError(str(e)) verbose_msg('{0} Cmd: "{1}"'.format(self.config.image, cmd)) - self.docker_cmd = cmd + # The user command is executed via a generated shell script + with self.open_scubadir_file('command.sh', 'wt') as f: + self.docker_cmd = ['/bin/sh', f.container_path] + writeln(f, '#!/bin/sh') + writeln(f, '# Auto-generated from scuba') + writeln(f, 'set -e') + writeln(f, shell_quote_cmd(cmd)) + def open_scubadir_file(self, name, mode): '''Opens a file in the 'scubadir'
scuba: Run user command via generated shell script See #<I>
py
diff --git a/lib/svtplay_dl/utils/__init__.py b/lib/svtplay_dl/utils/__init__.py index <HASH>..<HASH> 100644 --- a/lib/svtplay_dl/utils/__init__.py +++ b/lib/svtplay_dl/utils/__init__.py @@ -5,6 +5,7 @@ import sys import logging import re import unicodedata +import platform from operator import itemgetter try: @@ -182,3 +183,26 @@ def download_thumbnail(options, url): fd = open(tbn, "wb") fd.write(data) fd.close() + + +def which(program): + import os + + if platform.system() == "Windows": + program = "{0}.exe".format(program) + + def is_exe(fpath): + return os.path.isfile(fpath) and os.access(fpath, os.X_OK) + + fpath, fname = os.path.split(program) + if fpath: + if is_exe(program): + return program + else: + for path in os.environ["PATH"].split(os.pathsep): + path = path.strip('"') + exe_file = os.path.join(path, program) + if is_exe(exe_file): + return exe_file + + return None \ No newline at end of file
utils: implement a function to find program in path
py
diff --git a/test/test_plyfile.py b/test/test_plyfile.py index <HASH>..<HASH> 100644 --- a/test/test_plyfile.py +++ b/test/test_plyfile.py @@ -286,6 +286,20 @@ def test_write_read_str_filename(tmpdir, tet_ply_txt): verify(ply0, ply1) +def test_memmap(tmpdir, tet_ply_txt): + vertex = tet_ply_txt['vertex'] + face0 = PlyElement.describe(tet_ply_txt['face'].data, 'face0') + face1 = PlyElement.describe(tet_ply_txt['face'].data, 'face1') + + # Since the memory mapping requires some manual offset calculation, + # check that it's done correctly when there are elements before + # and after the one that can be memory-mapped. + ply0 = PlyData([face0, vertex, face1]) + ply1 = write_read(ply0, tmpdir) + + verify(ply0, ply1) + + # In Python 3, `unicode' is not a separate type from `str' (and the # `unicode' builtin does not exist). Thus, this test is unnecessary # (and indeed would not pass).
Add test case with sandwiched "simple" element
py
diff --git a/autolab_core/yaml_config.py b/autolab_core/yaml_config.py index <HASH>..<HASH> 100644 --- a/autolab_core/yaml_config.py +++ b/autolab_core/yaml_config.py @@ -92,7 +92,7 @@ class YamlConfig(object): def recursive_load(matchobj, path): first_spacing = matchobj.group(1) other_spacing = first_spacing.replace('-', ' ') - fname = os.path.join(path, matchobj.group(2)) + fname = os.path.join(path, matchobj.group(2).rstrip()) new_path, _ = os.path.split(fname) new_path = os.path.realpath(new_path) text = ''
remove trailing spaces for recursive loading
py
diff --git a/pymc3/step_methods/step_sizes.py b/pymc3/step_methods/step_sizes.py index <HASH>..<HASH> 100644 --- a/pymc3/step_methods/step_sizes.py +++ b/pymc3/step_methods/step_sizes.py @@ -67,15 +67,15 @@ class DualAverageAdaptation: mean_accept = np.mean(accept) target_accept = self._target # Try to find a reasonable interval for acceptable acceptance - # probabilities. Finding this was mostry trial and error. + # probabilities. Finding this was mostly trial and error. n_bound = min(100, len(accept)) n_good, n_bad = mean_accept * n_bound, (1 - mean_accept) * n_bound lower, upper = stats.beta(n_good + 1, n_bad + 1).interval(0.95) if target_accept < lower or target_accept > upper: msg = ( - "The acceptance probability does not match the target. It " - "is %s, but should be close to %s. Try to increase the " - "number of tuning steps." % (mean_accept, target_accept) + f"The acceptance probability does not match the target. " + f"It is {mean_accept:0.4g}, but should be close to {target_accept:0.4g}. " + f"Try to increase the number of tuning steps." ) info = {"target": target_accept, "actual": mean_accept} warning = SamplerWarning(WarningType.BAD_ACCEPTANCE, msg, "warn", extra=info)
reduce sigfigs for acceptance probability warning
py
diff --git a/src/feat/database/tools.py b/src/feat/database/tools.py index <HASH>..<HASH> 100644 --- a/src/feat/database/tools.py +++ b/src/feat/database/tools.py @@ -223,7 +223,7 @@ def migration_script(connection): include_docs=True) migrated += len(fetched) - if not migrated: + if not fetched: break log.info("script", "Migrated %d documents of the type %s " "from %s version to %s", migrated, type_name,
Fix typo making the migration script stuck in a loop.
py
diff --git a/django_inlines/inlines.py b/django_inlines/inlines.py index <HASH>..<HASH> 100644 --- a/django_inlines/inlines.py +++ b/django_inlines/inlines.py @@ -61,7 +61,7 @@ def parse_inline(text): if kwtxt: for kws in kwtxt.split(): k, v = kws.split('=') - kwargs[k] = v + kwargs[str(k)] = v return (name, value, kwargs)
Keys must be strings, not unicode to use it with **kwargs later.
py
diff --git a/pychromecast/controllers/plex.py b/pychromecast/controllers/plex.py index <HASH>..<HASH> 100644 --- a/pychromecast/controllers/plex.py +++ b/pychromecast/controllers/plex.py @@ -59,7 +59,7 @@ def media_to_chromecast_command( **kwargs ): # noqa: 501 pylint: disable=invalid-name, too-many-arguments, too-many-locals, protected-access, redefined-builtin """Create the message that chromecast requires. Use pass of plexapi media object or - set all the neeeded kwargs manually. See the code for what to set. + set all the needed kwargs manually. See the code for what to set. Args: media (None, optional): a :class:`~plexapi.base.Playable
docs: fix simple typo, neeeded -> needed (#<I>) There is a small typo in pychromecast/controllers/plex.py. Should read `needed` rather than `neeeded`.
py
diff --git a/multiqc/modules/skewer/skewer.py b/multiqc/modules/skewer/skewer.py index <HASH>..<HASH> 100644 --- a/multiqc/modules/skewer/skewer.py +++ b/multiqc/modules/skewer/skewer.py @@ -92,8 +92,8 @@ class MultiqcModule(BaseMultiqcModule): """ Go through log file looking for skewer output """ fh = f['f'] regexes = { - 'fq1': "Input file:\s+(\S+)\.(?:fastq|fq)(?:\.gz)?", - 'fq2': "Paired file:\s+(\S+)\.(?:fastq|fq)(?:\.gz)?", + 'fq1': "Input file:\s+(.+)", + 'fq2': "Paired file:\s+(.+)", 'r_processed': "(\d+) read|reads pairs? processed", 'r_short_filtered': "(\d+) \(\s*\d+.\d+%\) short read", 'r_empty_filtered': "(\d+) \(\s*\d+.\d+%\) empty read",
Made regex more relaxed to capture any input filename. See #<I>.
py
diff --git a/tests/test_api.py b/tests/test_api.py index <HASH>..<HASH> 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -172,7 +172,7 @@ class TestAPI(object): class Bar(Resource): def get(self): return 'bar' - major, minor = map(int, flask_version.split("."))[:2] + major, minor = tuple(map(int, flask_version.split(".")))[:2] if major == 0 and minor > 10: with pytest.raises(AssertionError) as err: api.add_resource(Bar, '/bar', endpoint='baz')
fix test A map in Python 3 doesn't return a list like it does in python 2.
py
diff --git a/ags_publishing_tools/MapServicePublisher.py b/ags_publishing_tools/MapServicePublisher.py index <HASH>..<HASH> 100644 --- a/ags_publishing_tools/MapServicePublisher.py +++ b/ags_publishing_tools/MapServicePublisher.py @@ -138,11 +138,13 @@ class MapServicePublisher: mxd = arcpy.mapping.MapDocument(self.config_parser.get_full_path(path_to_mxd)) for workspace in workspaces: - self.message("Replacing workspace " + workspace["old"]["path"] + " => " + workspace["new"]["path"]) + old_path = self.config_parser.get_full_path(workspace["old"]["path"]) + new_path = self.config_parser.get_full_path(workspace["new"]["path"]) + self.message("Replacing workspace " + old_path + " => " + new_path) mxd.replaceWorkspaces( - old_workspace_path=workspace["old"]["path"], + old_workspace_path=old_path, old_workspace_type=workspace["old"]["type"] if "type" in workspace["old"] else "SDE_WORKSPACE", - new_workspace_path=workspace["new"]["path"], + new_workspace_path=new_path, new_workspace_type=workspace["new"]["type"] if "type" in workspace["new"] else "SDE_WORKSPACE", validate=False )
Uses full paths for workspace replacement GES-<I>
py
diff --git a/colin/core/target.py b/colin/core/target.py index <HASH>..<HASH> 100644 --- a/colin/core/target.py +++ b/colin/core/target.py @@ -15,6 +15,7 @@ # import enum +import io import logging import os @@ -57,17 +58,22 @@ class Target(object): """ Get the Container/Image instance for the given name. (Container is the first choice.) - or DockerfileParser instance if the target is path. + or DockerfileParser instance if the target is path or file-like object. - :param target: str or instance of Image/Container - :return: Container/Image + :param target: str + or instance of Image/Container + or file-like object as Dockerfile + :return: Target object """ logger.debug("Finding target '{}'.".format(target)) if isinstance(target, (Image, Container)): logger.debug("Target is a conu object.") return target - if os.path.exists(target): + if isinstance(target, io.IOBase): + logger.debug("Target is a dockerfile loaded from the file-like object.") + return DockerfileParser(fileobj=target) + if os.path.isfile(target): logger.debug("Target is a dockerfile.") return DockerfileParser(fileobj=open(target))
Add support for the dockerfile as file-like object
py
diff --git a/pyinfra/operations/python.py b/pyinfra/operations/python.py index <HASH>..<HASH> 100644 --- a/pyinfra/operations/python.py +++ b/pyinfra/operations/python.py @@ -23,11 +23,14 @@ def call(function, *args, **kwargs): command = 'echo hello' if hello: command = command + ' ' + hello - status, stdout, stderr = host.run_shell_command(state, command=command, sudo=SUDO) + + status, stdout, stderr = host.run_shell_command(command=command, sudo=SUDO) assert status is True # ensure the command executed OK - if 'hello ' not in str(stdout): - raise Exception('`{}` problem with callback stdout:{} stderr:{}'.format( - command, stdout, stderr)) + + if 'hello ' not in '\\n'.join(stdout): # stdout/stderr is a *list* of lines + raise Exception( + f'`{command}` problem with callback stdout:{stdout} stderr:{stderr}', + ) python.call( name='Run my_callback function',
Fix the example in `python.call` operation doc.
py
diff --git a/skorch/callbacks/__init__.py b/skorch/callbacks/__init__.py index <HASH>..<HASH> 100644 --- a/skorch/callbacks/__init__.py +++ b/skorch/callbacks/__init__.py @@ -13,8 +13,25 @@ from .scoring import * from .training import * from .lr_scheduler import * -__all__ = ['Callback', 'EpochTimer', 'NeptuneLogger', 'PrintLog', 'ProgressBar', - 'LRScheduler', 'WarmRestartLR', 'GradientNormClipping', - 'BatchScoring', 'EpochScoring', 'Checkpoint', 'EarlyStopping', - 'Freezer', 'Unfreezer', 'Initializer', 'ParamMapper', - 'LoadInitState', 'TrainEndCheckpoint'] + +__all__ = [ + 'BatchScoring', + 'Callback', + 'Checkpoint', + 'EarlyStopping', + 'EpochScoring', + 'EpochTimer', + 'Freezer', + 'GradientNormClipping', + 'Initializer', + 'LRScheduler', + 'LoadInitState', + 'NeptuneLogger', + 'ParamMapper', + 'PrintLog', + 'ProgressBar', + 'TrainEndCheckpoint', + 'TensorBoard', + 'Unfreezer', + 'WarmRestartLR', +]
Add tensorboard to __all__ list
py
diff --git a/phypno/ioeeg/fieldtrip.py b/phypno/ioeeg/fieldtrip.py index <HASH>..<HASH> 100644 --- a/phypno/ioeeg/fieldtrip.py +++ b/phypno/ioeeg/fieldtrip.py @@ -10,7 +10,7 @@ except ImportError: lg.warning('scipy (optional dependency) is not installed. You will not ' 'be able to read and write in FieldTrip format.') -VAR = 'rawdata' +VAR = 'data' class FieldTrip:
fieldtrip wants data, not rawdata
py
diff --git a/demo/fitz2jpg.py b/demo/fitz2jpg.py index <HASH>..<HASH> 100644 --- a/demo/fitz2jpg.py +++ b/demo/fitz2jpg.py @@ -5,17 +5,9 @@ from PIL import Image import sys from __future__ import print_function ''' -demonstrates how to output a JPEG image from PyMuPDF using PIL / Pillow +Given any pixmap, use Pil / Pillow to save it in a different format +Example: JPEG ''' -if len(sys.argv) == 2: - pic_fn = sys.argv[1] -else: - pic_fn = None - -if pic_fn: - print(pic_fn) - pic = open(pic_fn, "rb").read() - - pix = fitz.Pixmap(pic, len(pic)) - img = Image.frombytes("RGBA",[pix.width, pix.height], str(pix.samples)) - img.save(pic_fn + ".jpg", "jpeg") \ No newline at end of file +pix = fitz.Pixmap(...) +img = Image.frombytes("RGBA",[pix.width, pix.height], str(pix.samples)) +img.save("filename.jpg", "jpeg")
Turned into a generalized Code Snippet
py
diff --git a/mapillary_tools/process_video.py b/mapillary_tools/process_video.py index <HASH>..<HASH> 100644 --- a/mapillary_tools/process_video.py +++ b/mapillary_tools/process_video.py @@ -175,6 +175,9 @@ def insert_video_frame_timestamp(video_filename, video_sampling_path, start_time def get_video_start_time(video_file): """Get video start time in seconds""" + if not os.path.isfile(video_file): + print("Error, video file {} does not exist".format(video_file)) + return None try: time_string = FFProbe(video_file).video[0].creation_time try:
add: write warning when getting start time if video file doesnt exist
py
diff --git a/arcana/__about__.py b/arcana/__about__.py index <HASH>..<HASH> 100644 --- a/arcana/__about__.py +++ b/arcana/__about__.py @@ -1,5 +1,5 @@ -__version__ = '0.3.1' +__version__ = '0.4' __authors__ = [ ("Thomas G. Close", "[email protected]"),
Upped version number to <I>
py
diff --git a/synapse/tests/test_telepath.py b/synapse/tests/test_telepath.py index <HASH>..<HASH> 100644 --- a/synapse/tests/test_telepath.py +++ b/synapse/tests/test_telepath.py @@ -122,6 +122,7 @@ class TeleTest(s_test.SynTest): def test_telepath_basics(self): foo = Foo() + evt = threading.Event() with self.getTestDmon() as dmon: @@ -132,6 +133,8 @@ class TeleTest(s_test.SynTest): # called via synchelp... prox = s_telepath.openurl('tcp://127.0.0.1/foo', port=addr[1]) + # Add an additional prox.fini handler. + prox.onfini(evt.set) self.false(prox.iAmLoop()) @@ -157,6 +160,10 @@ class TeleTest(s_test.SynTest): self.raises(s_exc.SynErr, prox.boom) + # Fini'ing a daemon fini's proxies connected to it. + self.true(evt.wait(2)) + self.true(prox.isifini) + @s_glob.synchelp async def test_telepath_async(self): foo = Foo()
Add a test showing that a proxy is fini'd when a daemon is shut down.
py
diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index <HASH>..<HASH> 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -872,6 +872,17 @@ class Window(_Window): 3 NaN 4 NaN + Rolling sum with a window length of 2, using the 'gaussian' + window type (note how we need to specify std). + + >>> df.rolling(2, win_type='gaussian').sum(std=3) + B + 0 NaN + 1 0.986207 + 2 2.958621 + 3 NaN + 4 NaN + Rolling sum with a window length of 2, min_periods defaults to the window length.
:pencil: add example of rolling with win_type gaussian (#<I>)
py
diff --git a/instabot/bot/bot_stats.py b/instabot/bot/bot_stats.py index <HASH>..<HASH> 100644 --- a/instabot/bot/bot_stats.py +++ b/instabot/bot/bot_stats.py @@ -38,7 +38,7 @@ def save_user_stats(self, username, path=""): infodict = self.get_user_info(user_id) if infodict: data_to_save = { - "date": str(datetime.datetime.now()), + "date": str(datetime.datetime.now().replace(microsecond=0)), "followers": int(infodict["follower_count"]), "following": int(infodict["following_count"]), "medias": int(infodict["media_count"])
From date in save_stats removed microseconds.
py
diff --git a/ckanext/oauth2/oauth2.py b/ckanext/oauth2/oauth2.py index <HASH>..<HASH> 100644 --- a/ckanext/oauth2/oauth2.py +++ b/ckanext/oauth2/oauth2.py @@ -102,10 +102,14 @@ class OAuth2Helper(object): headers = { 'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', - 'Authorization': 'Basic %s' % base64.urlsafe_b64encode( + } + + if self.legacy_idm: + # This is only required for Keyrock v6 and v5 + headers['Authorization'] = 'Basic %s' % base64.urlsafe_b64encode( '%s:%s' % (self.client_id, self.client_secret) ) - } + try: token = oauth.fetch_token(self.token_endpoint, headers=headers,
Using the Authorization header on the get_token method is only required when using a legacy version of KeyRock
py
diff --git a/phypno/viz/plot_1d.py b/phypno/viz/plot_1d.py index <HASH>..<HASH> 100644 --- a/phypno/viz/plot_1d.py +++ b/phypno/viz/plot_1d.py @@ -1,23 +1,16 @@ -import pyqtgraph as pg - - -win = pg.GraphicsWindow(title="Basic plotting examples") -win.resize(1000,600) -win.setWindowTitle('pyqtgraph example: Plotting') - -# Enable antialiasing for prettier plots -pg.setConfigOptions(antialias=True) - -p2 = win.addPlot(title="Multiple curves") +from pyqtgraph import GraphicsWindow def plot_data(data, xaxis='time', xlog=False, ylog=False): """Plot data in 2d. """ - - + win = GraphicsWindow(title="plot data") xval = getattr(data, xaxis) + for i_ch in range(len(data.chan_name)): - p2.plot(xval, data.data[i_ch, :]) + p = win.addPlot(title=data.chan_name[i_ch]) + p.plot(xval, data.data[i_ch, :]) + win.nextRow() + return win # avoid garbage-collection
pyqtgraph works well in ipython with gui qt
py
diff --git a/setup.py b/setup.py index <HASH>..<HASH> 100644 --- a/setup.py +++ b/setup.py @@ -22,6 +22,7 @@ install_requires = [ 'croniter==0.3.4', 'gevent-socketio>=0.3.6,<0.4', 'virtualenv>=1.11.6,<1.12', + 'requests', ] dev_requires = [ @@ -67,4 +68,4 @@ setup( 'Framework :: Django', 'Programming Language :: Python' ], -) \ No newline at end of file +)
Added requests module to setup.py Added requests module to install_requires in setup.py
py
diff --git a/flask_permissions/models.py b/flask_permissions/models.py index <HASH>..<HASH> 100644 --- a/flask_permissions/models.py +++ b/flask_permissions/models.py @@ -110,7 +110,18 @@ class UserMixin(db.Model): @hybrid_property def _id_column_name(self): - for k, v in self.__dict__.items(): + + # the list of the class's columns (with attributes like + # 'primary_key', etc.) is accessible in different places + # before and after table definition. + if self.__tablename__ in self.metadata.tables.keys(): + # after definition, it's here + columns = self.metadata.tables[self.__tablename__]._columns + else: + # before, it's here + columns = self.__dict__ + + for k, v in columns.items(): if getattr(v, 'primary_key', False): return k @@ -154,3 +165,9 @@ class UserMixin(db.Model): def remove_roles(self, *roles): self.roles = [role for role in self.roles if role not in roles] + + def get_id(self): + return unicode(getattr(self, self._id_column_name)) + + def __repr__(self): + return '<{} {}>'.format(self.__tablename__.capitalize(), self.get_id()) \ No newline at end of file
Add back two reasonable methods to UserMixin subclass ... after figuring out how to refer to the model's id before it is dynamically set
py
diff --git a/adafruit_platformdetect/chip.py b/adafruit_platformdetect/chip.py index <HASH>..<HASH> 100644 --- a/adafruit_platformdetect/chip.py +++ b/adafruit_platformdetect/chip.py @@ -104,9 +104,9 @@ class Chip: if compatible and 'amlogic, g12b' in compatible: linux_id = S922X - machine = self.detector.get_cpuinfo_field("machine") + cpu_model = self.detector.get_cpuinfo_field("cpu model") - if machine in ("Onion Omega", "Onion Omega2"): + if ("MIPS 24Kc") in cpu_model: linux_id = ONION elif hardware in ("BCM2708", "BCM2709", "BCM2835"):
Change detection of Onion Omega boards based on its 'cpu model'
py
diff --git a/astrobase/checkplot.py b/astrobase/checkplot.py index <HASH>..<HASH> 100644 --- a/astrobase/checkplot.py +++ b/astrobase/checkplot.py @@ -1409,7 +1409,7 @@ def _pkl_finder_objectinfo(objectinfo, dust_timeout = 10.0 gaia_submit_timeout = 7.0 gaia_max_timeout = 10.0 - gaia_submit_tries = 1 + gaia_submit_tries = 2 complete_query_later = False search_simbad = False @@ -1419,7 +1419,7 @@ def _pkl_finder_objectinfo(objectinfo, dust_timeout = fast_mode gaia_submit_timeout = 0.66*fast_mode gaia_max_timeout = fast_mode - gaia_submit_tries = 1 + gaia_submit_tries = 2 complete_query_later = False search_simbad = False
checkplot: set fast_mode gaia_submit_tries = 2
py
diff --git a/pypd/models/entity.py b/pypd/models/entity.py index <HASH>..<HASH> 100644 --- a/pypd/models/entity.py +++ b/pypd/models/entity.py @@ -260,7 +260,7 @@ class Entity(ClientMixin): endpoint = cls.get_endpoint() inst = cls(api_key=api_key) - parse_key = cls.sanitize_ep(endpoint) + parse_key = cls.sanitize_ep(endpoint).split("/")[-1] endpoint = '/'.join((endpoint, id)) data = cls._parse(inst.request('GET', endpoint=endpoint,
Use only the last path component when constructing data key Resources like integrations are nested beneath a service. The key should read 'integration', not 'services/ID/integration'.
py
diff --git a/src/com/dtmilano/android/plot.py b/src/com/dtmilano/android/plot.py index <HASH>..<HASH> 100644 --- a/src/com/dtmilano/android/plot.py +++ b/src/com/dtmilano/android/plot.py @@ -29,7 +29,7 @@ from mpl_toolkits.axes_grid1 import host_subplot from com.dtmilano.android.adb.dumpsys import Dumpsys -__version__ = '13.2.1' +__version__ = '13.2.2' DEBUG = True @@ -163,13 +163,13 @@ class Plot: for v in range(int(ceil(ymax)) + 1): x.append(1 / 60.0 * 10 ** 9) y.append(v) - plt.plot(x, y, linewidth=2, color='r') + plt.plot(x, y, linewidth=2, color='c') x = [] y = [] for v in range(int(ceil(ymax)) + 1): x.append(1 / 30.0 * 10 ** 9) y.append(v) - plt.plot(x, y, linewidth=2, color='c') + plt.plot(x, y, linewidth=2, color='r') plt.xlabel('ms') plt.ylabel('Frames')
Exchanged colors in framestats plot
py
diff --git a/www/speed/benchmarks/function_call_complex.py b/www/speed/benchmarks/function_call_complex.py index <HASH>..<HASH> 100644 --- a/www/speed/benchmarks/function_call_complex.py +++ b/www/speed/benchmarks/function_call_complex.py @@ -1,5 +1,5 @@ def f(x, y=0, *args, **kw): return x -for i in range(1000000): +for i in range(100000): f(i, 5, 6, a=8)
Less loops in speed/benchmarks/function_call_complex.py
py
diff --git a/pydevd_breakpoints.py b/pydevd_breakpoints.py index <HASH>..<HASH> 100644 --- a/pydevd_breakpoints.py +++ b/pydevd_breakpoints.py @@ -95,18 +95,25 @@ def _excepthook(exctype, value, tb): return frames = [] + debugger = GetGlobalDebugger() + user_frames = [] while tb: + frame = tb.tb_frame + if exception_breakpoint.ignore_libraries and not debugger.not_in_scope(frame.f_code.co_filename): + user_frames.append(tb.tb_frame) frames.append(tb.tb_frame) tb = tb.tb_next thread = threadingCurrentThread() frames_byid = dict([(id(frame),frame) for frame in frames]) - frame = frames[-1] + if exception_breakpoint.ignore_libraries: + frame = user_frames[-1] + else: + frame = frames[-1] thread.additionalInfo.exception = (exctype, value, tb) thread.additionalInfo.pydev_force_stop_at_exception = (frame, frames_byid) thread.additionalInfo.message = exception_breakpoint.qname - debugger = GetGlobalDebugger() pydevd_tracing.SetTrace(None) #no tracing from here
PY-<I> Debugger: Ignore library files stops at exception breakpoints inside libraries if On termination policy is active (cherry picked from commit ff1dcc8b<I>fd8c3f<I>d<I>fb9d<I>cf9e<I>dfc<I>)
py
diff --git a/hwt/serializer/verilog/statements.py b/hwt/serializer/verilog/statements.py index <HASH>..<HASH> 100644 --- a/hwt/serializer/verilog/statements.py +++ b/hwt/serializer/verilog/statements.py @@ -33,11 +33,7 @@ class ToHdlAstVerilog_statements(): return a def can_pop_process_wrap(self, stms, hasToBeVhdlProcess): - if hasToBeVhdlProcess: - return False - else: - assert len(stms) == 1 - return True + return False # because block contains label with process name def has_to_be_process(self, proc: HdlStatementBlock): for o in proc._outputs: @@ -62,4 +58,12 @@ class ToHdlAstVerilog_statements(): # all input are constant and that is why this process does not have # any sensitivity p.sensitivity = [HdlAll, ] + + # add label + if not isinstance(p.body, HdlStmBlock): + b = p.body + p.body = HdlStmBlock() + p.body.body.append(b) + p.body.labels.extend(p.labels) + p.labels.clear() return p
ToHdlAstVerilog_statements: add process label to a body block stm.
py
diff --git a/loguru/_logger.py b/loguru/_logger.py index <HASH>..<HASH> 100644 --- a/loguru/_logger.py +++ b/loguru/_logger.py @@ -1255,6 +1255,11 @@ class Logger: ``Level`` A namedtuple containing information about the level. + Raises + ------ + ValueError + If there is no level registered with such ``name``. + Examples -------- >>> level = logger.level("ERROR")
Document ValueError exception raised by ".level()"
py
diff --git a/openquake/commonlib/source.py b/openquake/commonlib/source.py index <HASH>..<HASH> 100644 --- a/openquake/commonlib/source.py +++ b/openquake/commonlib/source.py @@ -23,7 +23,6 @@ from xml.etree import ElementTree as etree import numpy from openquake.baselib.general import AccumDict, groupby, block_splitter -from openquake.hazardlib.const import TRT from openquake.commonlib.node import read_nodes from openquake.commonlib import logictree, sourceconverter, parallel, valid from openquake.commonlib.nrml import nodefactory, PARSE_NS_MAP @@ -515,7 +514,7 @@ def get_trts(smodel): :param smodel: a :class:`openquake.commonlib.source.SourceModel` tuple :returns: a comma separated string of uppercase tectonic region types """ - return ','.join(TRT[capitalize(tmodel.trt)] + return ','.join(capitalize(tmodel.trt) for tmodel in smodel.trt_models)
Removed dependence from cost.TRT
py
diff --git a/manage/sawtooth_manage/subproc.py b/manage/sawtooth_manage/subproc.py index <HASH>..<HASH> 100644 --- a/manage/sawtooth_manage/subproc.py +++ b/manage/sawtooth_manage/subproc.py @@ -19,6 +19,7 @@ import subprocess import time import yaml import re +import sys from sawtooth_manage.node import NodeController
Missing library in subprocess manage type The sys library was needed but missing. Added import of sys.
py
diff --git a/pymola/parser.py b/pymola/parser.py index <HASH>..<HASH> 100644 --- a/pymola/parser.py +++ b/pymola/parser.py @@ -597,13 +597,17 @@ class ASTListener(ModelicaListener): sym.class_modification = mod else: # Assignment of value, which we turn into a modification here. - sym_mod = ast.ClassModification() vmod_arg = ast.ClassModificationArgument() vmod_arg.value = ast.ElementModification() vmod_arg.value.component = ast.ComponentRef(name="value") vmod_arg.value.modifications = [mod] - sym_mod.arguments.append(vmod_arg) - sym.class_modification = sym_mod + + if sym.class_modification is None: + sym_mod = ast.ClassModification() + sym_mod.arguments.append(vmod_arg) + sym.class_modification = sym_mod + else: + sym.class_modification.arguments.append(vmod_arg) def exitElement_modification(self, ctx): component = self.ast[ctx.component_reference()]
Fix parsing combination of mods and direct value This bug was introduced in commit d<I>, where the (wrong) assumption was made that combinations of symbol modifications and value assignments were not possible. In other words, the assumption was made that something like the following could not occur: Real x(nominal=<I>) = <I>;
py
diff --git a/tensorboard/compat/proto/proto_test.py b/tensorboard/compat/proto/proto_test.py index <HASH>..<HASH> 100644 --- a/tensorboard/compat/proto/proto_test.py +++ b/tensorboard/compat/proto/proto_test.py @@ -174,7 +174,27 @@ The proper fix is: ./tensorboard/compat/proto/update.sh PATH_TO_TENSORFLOW_REPO -3. Review and commit any changes. +3. Verify the updates build. In your tensorboard repo, run: + + bazel build tensorboard/compat/proto/... + + If they fail with an error message like the following: + + '//tensorboard/compat/proto:full_type_genproto' does not exist + + Then create the file in the tensorboard repo: + + touch tensorboard/compat/proto/full_type.proto + + And return to step 2. `update.sh` will only copy files that already exist in + the tensorboard repo. + +4. Update the rust data server proto binaries. In your tensorboard repo, run: + + bazel run //tensorboard/data/server:update_protos + +5. Review and commit any changes. + """
Improve error message for proto_test. (#<I>) Improve error message for proto_test * Add step for generating protos for RustBoard. * Add instructions on how to handle new files that must be copied over to TB for the BUILD to succeed.
py
diff --git a/textx/metamodel.py b/textx/metamodel.py index <HASH>..<HASH> 100644 --- a/textx/metamodel.py +++ b/textx/metamodel.py @@ -51,8 +51,8 @@ class TextXMetaModel(dict): Attributes: rootcls(TextXClass): A language class that is a root of the metamodel. - file_name(str): A file name if meta-model was constructed from file - or None otherwise. + file_name(str): A absolute file name if meta-model was constructed + from file or None otherwise. root_dir(str): Absolute directory used as a root for relative grammar imports. If not given file_name dir is used if given. namespace(str): The namespace of this metamodel calculated from @@ -270,7 +270,7 @@ def metamodel_from_str(lang_desc, classes=None, builtins=None, file_name=None, metamodel = TextXMetaModel(file_name=file_name, root_dir=root_dir, classes=classes, builtins=builtins) - # Base types hierarchy + # Base types hierarchy should exist in each meta-model ID = metamodel.new_class('ID', 0) STRING = metamodel.new_class('STRING', 0) BOOL = metamodel.new_class('BOOL', 0)
Small update in docstring and comment
py