\n"}}},{"rowIdx":1103,"cells":{"message":{"kind":"string","value":"fix wrong link\nthe previous link can not open, the correct link maybe"},"diff":{"kind":"string","value":"@@ -19,5 +19,5 @@ Resources\n* `Blog `_\n* `Twitter `_\n* `Code Review `_\n-* `Git Web `_\n+* `Git Web `_\n* `IRC `_ -- **#openstack-browbeat** (irc.freenode.net)\n"}}},{"rowIdx":1104,"cells":{"message":{"kind":"string","value":"Add more details to the internal error for \"worker cannot find registered function\"\nThis adds some more debug information for this internal error that shouldn't happen."},"diff":{"kind":"string","value":"import dis\nimport hashlib\n+import os\nimport importlib\nimport inspect\nimport json\n@@ -405,7 +406,10 @@ class FunctionActorManager:\nwarning_message = (\n\"This worker was asked to execute a \"\n\"function that it does not have \"\n- \"registered. You may have to restart \"\n+ f\"registered ({function_descriptor}, \"\n+ f\"node={self._worker.node_ip_address}, \"\n+ f\"worker_id={self._worker.worker_id.hex()}, \"\n+ f\"pid={os.getpid()}). You may have to restart \"\n\"Ray.\"\n)\nif not warning_sent:\n"}}},{"rowIdx":1105,"cells":{"message":{"kind":"string","value":"Allow user-defined kwargs passed to click.group\nFixes"},"diff":{"kind":"string","value":"@@ -57,6 +57,8 @@ def group(\nshort_help: str = None,\noptions_metavar: str = '[OPTIONS]',\nadd_help_option: bool = True,\n+ # User-defined\n+ **kwargs: Any,\n) -> _Decorator:\n...\n"}}},{"rowIdx":1106,"cells":{"message":{"kind":"string","value":"Orders trustees by id\nOrders trustees by id to garantee access order on \"freeze\" method"},"diff":{"kind":"string","value":"@@ -1171,7 +1171,7 @@ class Trustee(HeliosModel):\n@classmethod\ndef get_by_election(cls, election):\n- return cls.objects.filter(election = election)\n+ return cls.objects.filter(election = election).order_by('id')\n@classmethod\ndef get_by_uuid(cls, uuid):\n"}}},{"rowIdx":1107,"cells":{"message":{"kind":"string","value":"Fix typo\nFix \"contorls\" to \"controls\" in window_text docstring"},"diff":{"kind":"string","value":"@@ -307,7 +307,7 @@ class BaseWrapper(object):\n\"\"\"\nWindow text of the element\n- Quite a few contorls have other text that is visible, for example\n+ Quite a few controls have other text that is visible, for example\nEdit controls usually have an empty string for window_text but still\nhave text displayed in the edit window.\n\"\"\"\n"}}},{"rowIdx":1108,"cells":{"message":{"kind":"string","value":"Minor relocate of badge\nno more info needed"},"diff":{"kind":"string","value":"\nCool Instagram scripts for promotion and API wrapper. Written in Python.\n___\n+[](https://t.me/joinchat/AAAAAEHxHAtKhKo4X4r7xg)\n+\nAs you may know, Instagram closed its API in summer 2016. This Python module can do the same things without any effort. Also it has lots of [example scripts](https://github.com/ohld/instabot/tree/master/examples) to start with.\nIf you have any ideas, please, leave them in [Issues section](https://github.com/ohld/instabot/issues) or in our [Telegram chat](https://t.me/joinchat/AAAAAEHxHAtKhKo4X4r7xg).\n-[](https://t.me/joinchat/AAAAAEHxHAtKhKo4X4r7xg)\n-\n-*Your __contribution__ and support through __Stars__ will be highly appreciated.*\n+*Your __contribution__ and support through __stars__ will be highly appreciated.*\n## How to install and update\n"}}},{"rowIdx":1109,"cells":{"message":{"kind":"string","value":"{Compute} Doc fix for 'vm user delete'\nClarify that 'vm user delete' also removes the home directory on Linux systems."},"diff":{"kind":"string","value":"@@ -1950,6 +1950,8 @@ short-summary: Manage user accounts for a VM.\nhelps['vm user delete'] = \"\"\"\ntype: command\nshort-summary: Delete a user account from a VM.\n+long-summary: >\n+ Also deletes the user home directory on Linux VMs.\nexamples:\n- name: Delete a user account.\ntext: az vm user delete -u username -n MyVm -g MyResourceGroup\n"}}},{"rowIdx":1110,"cells":{"message":{"kind":"string","value":"Adds the job user and team\nThis is required as some challenge admins want to see the teams for\neach job in order to download the supplementary file."},"diff":{"kind":"string","value":"{% extends \"site.html\" %}\n{% load evaluation_extras %}\n+{% load user_profile_link from profiles %}\n{% load guardian_tags %}\n{% load url from grandchallenge_tags %}\n\n
\n
ID
\n+ {% if \"change_challenge\" in challenge_perms %}\n+
User
\n+ {% endif %}\n
Created
\n
Updated
\n
Status
\n{% for job in object_list %}\n
\n
{{ job.id }}
\n+ {% if \"change_challenge\" in challenge_perms %}\n+
\n+ {{ job.submission.creator|user_profile_link }}\n+\n+ {% if site.evaluation_config.use_teams %}\n+ {% with job.result|get_team_html as team_html %}\n+ {% if team_html %}\n+ ({{ team_html }})\n+ {% endif %}\n+ {% endwith %}\n+ {% endif %}\n+
\n+ {% endif %}\n
{{ job.created }}
\n
{{ job.modified }}
\n
\n"}}},{"rowIdx":1111,"cells":{"message":{"kind":"string","value":"add stdout as an output format for report subcommand\nUsing --format=stdout now writes output to STDOUT in human-readable\nform, in addition to tabular/Excel/etc."},"diff":{"kind":"string","value":"@@ -41,6 +41,7 @@ THE SOFTWARE.\n\"\"\"\nimport os\n+import sys\nimport numpy as np\nimport pandas as pd\n@@ -123,6 +124,13 @@ def write_styled_html(path, df, index=None):\nofh.write(html)\n+# Write a dataframe to STDOUT\n+def write_to_stdout(stem, df, index=None, line_width=None):\n+ \"\"\"Write dataframe in tab-separated form to STDOUT.\"\"\"\n+ sys.stdout.write(\"TABLE: %s\\n\" % stem)\n+ sys.stdout.write(df.to_string(index=index, line_width=line_width) + '\\n\\n')\n+\n+\n# Write a table returned from the pyani database in the requested format\ndef write_dbtable(data, headers, path=None, formats=('tab',), index=False):\n\"\"\"Write database result table to output file in named format.\"\"\"\n@@ -131,7 +139,9 @@ def write_dbtable(data, headers, path=None, formats=('tab',), index=False):\nformatdict = {'tab': (df.to_csv, {'sep': '\\t', 'index': False}, '.tab'),\n'excel': (df.to_excel, {'index': False}, '.xlsx'),\n'html': (write_styled_html, {'df': df, 'index': index},\n- '.html')}\n+ '.html'),\n+ 'stdout': (write_to_stdout, {'df': df, 'index': False}, '')\n+ }\nfor format in formats:\nfunc, args, ext = formatdict[format]\nofname = path + ext\n"}}},{"rowIdx":1112,"cells":{"message":{"kind":"string","value":"chore: correct region tag in submit_job_to_cluster.py\nChange region tag to make it unique. The previous tag was used in another create cluster file and caused problems with automation tools."},"diff":{"kind":"string","value":"@@ -85,7 +85,7 @@ def download_output(project, cluster_id, output_bucket, job_id):\nreturn bucket.blob(output_blob).download_as_string()\n-# [START dataproc_create_cluster]\n+# [START dataproc_submit_job_create_cluster]\ndef create_cluster(dataproc, project, zone, region, cluster_name):\n\"\"\"Create the cluster.\"\"\"\nprint(\"Creating cluster...\")\n@@ -110,7 +110,7 @@ def create_cluster(dataproc, project, zone, region, cluster_name):\nwaiting_callback = True\n-# [END dataproc_create_cluster]\n+# [END dataproc_submit_job_create_cluster]\ndef callback(operation_future):\n"}}},{"rowIdx":1113,"cells":{"message":{"kind":"string","value":"wallet.get_request_by_addr: make deterministic\nThis makes test_invoices/test_wallet_get_request_by_addr pass without flakyness.\ncloses"},"diff":{"kind":"string","value":"@@ -2355,8 +2355,13 @@ class Abstract_Wallet(ABC, Logger, EventListener):\nif not req.is_lightning() or self.lnworker.get_invoice_status(req) == PR_UNPAID]\nif not reqs:\nreturn None\n- # note: there typically should not be more than one relevant request for an address\n- return reqs[0]\n+ # note: There typically should not be more than one relevant request for an address.\n+ # If there's multiple, return the one created last (see #8113). Consider:\n+ # - there is an old expired req1, and a newer unpaid req2, reusing the same addr (and same amount),\n+ # - now req2 gets paid. however, get_invoice_status will say both req1 and req2 are PAID. (see #8061)\n+ # - as a workaround, we return the request with the larger creation time.\n+ reqs.sort(key=lambda req: req.get_time())\n+ return reqs[-1]\ndef get_request(self, request_id: str) -> Optional[Invoice]:\nreturn self._receive_requests.get(request_id)\n"}}},{"rowIdx":1114,"cells":{"message":{"kind":"string","value":"Update\nUpdate desc"},"diff":{"kind":"string","value":"no_log_contains: id \"942190\"\n-\ntest_title: 942190-40\n- desc: \"MSSQL Logical Functions - IIF (Transact-SQL)\"\n+ desc: \"MSSQL Logical Functions - IIF (Transact-SQL) - regression test\"\nstages:\n-\nstage:\n"}}},{"rowIdx":1115,"cells":{"message":{"kind":"string","value":"missing pipe\n[nodeploy]"},"diff":{"kind":"string","value":"@@ -19,7 +19,7 @@ fi\necho \"Starting devserver in new tmux session...\"\ntmux new-session -d -s $session\ntmux new-window -t \"$session:1\" -n gae \"dev_appserver.py --admin_host=0.0.0.0 --host=0.0.0.0 --datastore_path=/datastore/tba.db src/default.yaml src/web.yaml src/api.yaml src/dispatch.yaml 2>&1 | tee /var/log/tba.log; read\"\n-tmux new-window -t \"$session:2\" -n gulp \"gulp 2>&1 tee /var/log/gulp.log; read\"\n+tmux new-window -t \"$session:2\" -n gulp \"gulp 2>&1 | tee /var/log/gulp.log; read\"\nif [ ! -z \"$instance_name\" ]; then\necho \"Starting Cloud SQL proxy to connect to $instance_name\"\ntmux new-window -t \"$session:3\" -n sql \"/cloud_sql_proxy -instances=$instance_name=tcp:3306 -credential_file=$auth_path | tee /var/log/sql.log; read\"\n"}}},{"rowIdx":1116,"cells":{"message":{"kind":"string","value":"Changes default \"onBadFit\" option to *nothing* (not even Robust+).\nThis update to the default behavior of do_long_sequence_gst when\na model doesn't fit the data is more conservative -- only do the\nspecial Robust+ or wildcard post-processing analysis when the user\nspecificially requests it."},"diff":{"kind":"string","value":"@@ -1329,7 +1329,7 @@ def _post_opt_processing(callerName, ds, target_model, mdl_start, lsgstLists,\nobjective = advancedOptions.get('objective', 'logl')\nbadFitThreshold = advancedOptions.get('badFitThreshold',DEFAULT_BAD_FIT_THRESHOLD)\nif ret.estimates[estlbl].misfit_sigma(evaltree_cache=evaltree_cache, comm=comm) > badFitThreshold:\n- onBadFit = advancedOptions.get('onBadFit',[\"wildcard\"]) #[\"Robust+\"]) # empty list => 'do nothing'\n+ onBadFit = advancedOptions.get('onBadFit',[]) #[\"wildcard\"]) #[\"Robust+\"]) # empty list => 'do nothing'\nif len(onBadFit) > 0 and parameters.get('weights',None) is None:\n"}}},{"rowIdx":1117,"cells":{"message":{"kind":"string","value":"Python API: override __nonzero__ for node wrappers\nTN:"},"diff":{"kind":"string","value":"@@ -790,6 +790,16 @@ class ${root_astnode_name}(object):\nctypes.byref(result))\nreturn ${root_astnode_name}._wrap(result)\n+ def __nonzero__(self):\n+ \"\"\"\n+ Return always True so that checking a node against None can be done as\n+ simply as::\n+\n+ if node:\n+ ...\n+ \"\"\"\n+ return True\n+\ndef __len__(self):\n\"\"\"Return the number of ${root_astnode_name} children this node has.\"\"\"\nnode = self._unwrap(self)\n"}}},{"rowIdx":1118,"cells":{"message":{"kind":"string","value":"container-common: Enable docker on boot for ubuntu\ndocker daemon is automatically started during package installation\nbut the service isn't enabled on boot."},"diff":{"kind":"string","value":"tags:\nwith_pkg\n- - name: start docker service\n- service:\n- name: docker\n- state: started\n- enabled: yes\n- tags:\n- with_pkg\n-\n- name: red hat 8 based systems tasks\nwhen:\n- ansible_distribution_major_version == '8'\ntags:\nwith_pkg\n+- name: start docker service\n+ service:\n+ name: docker\n+ state: started\n+ enabled: yes\n+ tags:\n+ with_pkg\n+ when: not (ansible_os_family == 'RedHat' and\n+ ansible_distribution_major_version == '8')\n+\n- name: ensure tmpfiles.d is present\nlineinfile:\npath: /etc/tmpfiles.d/ceph-common.conf\n"}}},{"rowIdx":1119,"cells":{"message":{"kind":"string","value":"Improve sentence parsing\nI've always parsed this sentence as \"attrs comes with serious, business aliases\". I just realized you probably meant srs bzns aliases and figured I'd clarify."},"diff":{"kind":"string","value":"@@ -48,7 +48,7 @@ By default, all features are added, so you immediately have a fully functional d\nAs shown, the generated ``__init__`` method allows for both positional and keyword arguments.\n-If playful naming turns you off, ``attrs`` comes with serious business aliases:\n+If playful naming turns you off, ``attrs`` comes with serious-business aliases:\n.. doctest::\n"}}},{"rowIdx":1120,"cells":{"message":{"kind":"string","value":"Setup (Windows): Query inkscape install location correctly\nMSI installer writes install location in key\nHKLM\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\inkscape.exe"},"diff":{"kind":"string","value":"@@ -117,14 +117,14 @@ goto FINAL\n:DETECT_INKSCAPE_LOCATION\necho Trying to find Inkscape in Windows Registry...\n+rem Checking NSIS-Installer registry information\nrem Inkscape installation path is usually found in the registry\n-rem \"SOFTWARE\\Inkscape\\Inkscape\"\n-rem under HKLM (Local Machine -> machine wide installation) or\n-rem HKCU (Current User -> user installation)\n+rem \"SOFTWARE\\Inkscape\\Inkscape\" under HKLM (Local Machine ->\n+rem machine wide installation) or rem HKCU (Current User ->\n+rem user installation) if installed via NSIS exe installer.\nrem We also have to keep in mind that the values might be in the 32bit or 64bit\nrem version of the registry (i.e., under SOFTWARE\\WOW6432Node\\Inkscape\\Inkscape\nrem or SOFTWARE\\Inkscape\\Inkscape)\n-rem This holds if Inkscape has been installed via via NSIS, not via MSI\nfor %%R in (HKLM HKCU) do (\nfor %%T in (32 64) do (\nrem Output of REG QUERY \"KeyName\" /ve is (first line is a blank line):\n@@ -136,7 +136,7 @@ for %%R in (HKLM HKCU) do (\nrem so we skip the first two lines (skip=2) and then we take the second token\nrem and the reamining output (tokens=2*), so %%A is REG_SZ and %%B is the path\nrem even if it contains spaces (tokens are delimited by spaces)\n- echo Trying registry root %%R [%%T]...\n+ echo Trying SOFTWARE\\Inkscape\\Inkscape in registry root %%R [%%T]...\nfor /f \"usebackq skip=2 tokens=2*\" %%A in (`REG QUERY \"%%R\\SOFTWARE\\Inkscape\\Inkscape\" /ve /reg:%%T 2^>nul`) do (\nif exist %%B (\nset INKSCAPE_DIR=%%B\n@@ -157,6 +157,33 @@ for %%R in (HKLM HKCU) do (\n)\n)\n+\n+rem Checking MSI-Installer registry information\n+rem Inkscape installation path is usually found in the registry\n+rem under key \"Path\" in\n+rem SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\inkscape.exe\n+rem if installed via msi installer\n+for %%T in (32 64) do (\n+ echo Trying SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\inkscape.exe in registry root HKLM [%%T]...\n+ for /f \"usebackq skip=2 tokens=2*\" %%A in (`REG QUERY \"HKLM\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\inkscape.exe\" /v Path /reg:%%T 2^>nul`) do (\n+ if exist %%B (\n+ set INKSCAPE_DIR=%%B\n+ )\n+ )\n+ if defined INKSCAPE_DIR (\n+ echo Inkscape considered to be installed in !INKSCAPE_DIR!\n+ echo Setting executable path to !INKSCAPE_DIR!\n+ if exist \"!INKSCAPE_DIR!\\!INKSCAPE_EXENAME!\" (\n+ echo !INKSCAPE_DIR!\\!INKSCAPE_EXENAME! found\n+ echo.\n+ goto INKSCAPE_FOUND\n+ ) else (\n+ echo !INKSCAPE_DIR!\\!INKSCAPE_EXENAME! not found\n+ )\n+ )\n+)\n+\n+\nrem If we did non succeed in the registry lets have a look\nrem at the most common install locations\necho Trying the usual Windows install locations...\n"}}},{"rowIdx":1121,"cells":{"message":{"kind":"string","value":"Quick syntax correction for clarity\n`''.join(srcCode)` is not really readable. On the other hand, `str.join('', srcCode)` is much better."},"diff":{"kind":"string","value":"@@ -31,7 +31,7 @@ for file in allFiles:\nsrcCode = f.readlines()\n# The last three lines are always the main() call\nsrcCode = srcCode[:-3]\n- srcCode = ''.join(srcCode)\n+ srcCode = str.join('', srcCode)\nmodule.__SRC_CODE = srcCode\nbmpSrcCode = highlight(srcCode, PythonLexer(), BmpImageFormatter())\n"}}},{"rowIdx":1122,"cells":{"message":{"kind":"string","value":"Update language ID map\nhtml: add text.html.ngx for angular files\nshaderlab: these are supposedly Unity Shaderlab files\nr: the R language server can also handle R-flavoured markdown files\nxsl and xml: decouple them\nIn general, added repo links to thirdparty syntaxes"},"diff":{"kind":"string","value":"\"bibtex\": \"text.bibtex\",\n\"cpp\": \"source.c++\",\n\"csharp\": \"source.cs\",\n- \"html\": \"embedding.php | text.html.basic\",\n+ \"html\": \"embedding.php | text.html.basic | text.html.ngx\", // https://github.com/princemaple/ngx-html-syntax\n\"javascript\": \"source.js\",\n- \"javascriptreact\": \"source.jsx\", // 3rdparty\n+ \"javascriptreact\": \"source.jsx\",\n\"jsonc\": \"source.json\",\n\"latex\": \"text.tex.latex\",\n\"markdown\": \"text.html.markdown\",\n\"objective-c\": \"source.objc\",\n\"objective-cpp\": \"source.objc++\",\n\"php\": \"source.php | embedding.php\",\n+ \"r\": \"source.r | text.html.markdown.rmarkdown\", // https://github.com/REditorSupport/sublime-ide-r\n\"ruby\": \"text.html.ruby | source.ruby\",\n- \"shaderlab\": \"source.glsl | source.essl\", // 3rdparty\n+ \"shaderlab\": \"source.unity_shader | source.shader\", // https://github.com/petereichinger/Unity3D-Shader, https://github.com/waqiju/unity_shader_st3\n\"shellscript\": \"source.shell.bash\",\n\"typescript\": \"source.ts\",\n\"typescriptreact\": \"source.tsx\",\n\"txt\": \"text.plain\",\n- \"vue\": \"text.html.vue\", // 3rdparty\n- \"xml\": \"text.xml\",\n- \"xsl\": \"text.xml\", // 3rdparty\n+ \"vue\": \"text.html.vue\", // https://github.com/vuejs/vue-syntax-highlight\n+ \"xml\": \"text.xml - text.xml.xsl\",\n+ \"xsl\": \"text.xml.xsl\", // https://github.com/packagecontrol/XSL\n}\n"}}},{"rowIdx":1123,"cells":{"message":{"kind":"string","value":"boot: Remove special case for pypy load failures\nThere was a special case for Pypy in the handling of errors when loading\ncomponents. It looks like in the years since it was written, it may\nhave become unnecessary. Removing it leads to more helpful error\nmessages, so... let's remove it?"},"diff":{"kind":"string","value":"@@ -104,9 +104,7 @@ def _do_import (name):\nmessage = str(sys.exc_info()[1].args[0])\ns = message.rsplit(\" \", 1)\n- # Sadly, PyPy isn't consistent with CPython here.\n- #TODO: Check on this behavior in pypy 2.0.\n- if s[0] == \"No module named\" and (name.endswith(s[1]) or __pypy__):\n+ if s[0] == \"No module named\" and (name.endswith(s[1])):\n# It was the one we tried to import itself. (Case 1)\n# If we have other names to try, try them!\nreturn do_import2(base_name, names_to_try)\n"}}},{"rowIdx":1124,"cells":{"message":{"kind":"string","value":"fix bug wth --runtest where software or system packages were not showing due to directory error.\nThe if conditions in eb_menu were not setup properly."},"diff":{"kind":"string","value":"@@ -32,7 +32,7 @@ import subprocess\nimport time\nimport glob\n-from buildtest.tools.config import BUILDTEST_ROOT\n+from buildtest.tools.config import BUILDTEST_ROOT, config_opts\nfrom buildtest.tools.menu import buildtest_menu\ndef systempkg_menu(systempkg):\n@@ -199,6 +199,8 @@ def eb_menu(ebpkg):\napp_tc_set = set()\n+\n+\n# translate directory path into app name/version and toolchain name/version\nfor item in testroot_set:\n# directory format $BUILDTEST_TESTDIR/ebapps/software/version, ebapp only 2 directories up\n@@ -224,8 +226,7 @@ def eb_menu(ebpkg):\napp_tc_set.add(app_ver+\",\"+tcname_tcver)\n# directory format $BUILDTEST_TESTDIR/ebapps/software/version/package, ebapp only 3 directories up\n-\n- if os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(item)))) == \"ebapp\":\n+ elif os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(item)))) == \"ebapp\":\napp = os.path.basename(os.path.dirname(os.path.dirname(item)))\nver = os.path.basename(os.path.dirname(item))\n@@ -235,7 +236,7 @@ def eb_menu(ebpkg):\n# directory format $BUILDTEST_TESTDIR/ebapps/software/version/toolchainname/toolchainver/package, ebapp only 5 directories up\n- if os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(item)))))) == \"ebapp\":\n+ elif os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(item)))))) == \"ebapp\":\napp = os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(item)))))\nver = os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(item))))\n@@ -438,11 +439,8 @@ def runtest_menu():\nos.system(\"clear\")\n- cwd = BUILDTEST_ROOT\n- testing = os.path.join(cwd,\"testing\")\n- systempkg = os.path.join(testing,\"system\")\n- ebpkg = os.path.join(testing,\"ebapp\")\n-\n+ system_testdir = os.path.join(config_opts['BUILDTEST_TESTDIR'],\"system\")\n+ software_testdir = os.path.join(config_opts['BUILDTEST_TESTDIR'],\"ebapp\")\ntext = \"\"\"\n_________________________________________________________________________\n|\\ /|\n@@ -491,8 +489,8 @@ def runtest_menu():\n# force userinput to be integer in case its float or something else\nuserinput = int(userinput)\nif userinput == 1:\n- systempkg_menu(systempkg)\n+ systempkg_menu(system_testdir)\nelif userinput == 2:\n- eb_menu(ebpkg)\n+ eb_menu(software_testdir)\nelse:\nprint \"Invalid Entry, please try again\"\n"}}},{"rowIdx":1125,"cells":{"message":{"kind":"string","value":"Fixes an \"*\" import in the middle of the code.\nImporting everything without namespace is a bad practice.\nDoing it outside module level is currently forbidden.\nPython 3.9.1 refuses to compile it.\nFlake8 reports: F406 'from kicost.kicost_gui import *' only allowed\nat module level."},"diff":{"kind":"string","value":"@@ -63,7 +63,7 @@ class kicost_kicadplugin(ActionPlugin):\nbom_file = ''\ntry:\ntry:\n- from kicost.kicost_gui import *\n+ from kicost.kicost_gui import kicost_gui\nkicost_gui(bom_file) # If KiCad and KiCost share the same Python installation.\nexcept ImportError:\nsubprocess.call(('kicost', '--guide', bom_file), shell=True)\n"}}},{"rowIdx":1126,"cells":{"message":{"kind":"string","value":"Update train.py\nremove redundant code"},"diff":{"kind":"string","value":"@@ -98,7 +98,6 @@ def main(train_data_file, test_data_file, vocab_file, target_file, emb_file,\nfor pass_id in xrange(num_passes):\nchunk_evaluator.reset(exe)\nfor data in train_reader():\n- print len(data)\ncost, batch_precision, batch_recall, batch_f1_score = exe.run(\nfluid.default_main_program(),\nfeed=feeder.feed(data),\n"}}},{"rowIdx":1127,"cells":{"message":{"kind":"string","value":"Split the empty cluster case from normal case\nCover normal usage of get_brokers function"},"diff":{"kind":"string","value":"@@ -339,6 +339,8 @@ class TestZK(object):\n}\nassert actual_without_fetch_state == expected_without_fetch_state\n+ def test_get_topics_empty_cluster(self, mock_client):\n+ with ZK(self.cluster_config) as zk:\nzk.get_children = mock.Mock(side_effect=NoNodeError())\nactual_with_no_node_error = zk.get_topics()\nexpected_with_no_node_error = {}\n@@ -346,6 +348,29 @@ class TestZK(object):\nassert actual_with_no_node_error == expected_with_no_node_error\ndef test_get_brokers(self, mock_client):\n+ with ZK(self.cluster_config) as zk:\n+ zk.get_children = mock.Mock(\n+ return_value=[1, 2, 3],\n+ )\n+ expected = {1: None, 2: None, 3: None}\n+ actual = zk.get_brokers(names_only=True)\n+ zk.get_children.assert_called_with(\"/brokers/ids\")\n+ assert actual == expected\n+\n+ zk.get_children = mock.Mock(\n+ return_value=[1, 2, 3],\n+ )\n+ zk.get_broker_metadata = mock.Mock(\n+ return_value='broker',\n+ )\n+ expected = {1: 'broker', 2: 'broker', 3: 'broker'}\n+ actual = zk.get_brokers()\n+ zk.get_children.assert_called_with(\"/brokers/ids\")\n+ calls = zk.get_broker_metadata.mock_calls\n+ zk.get_broker_metadata.assert_has_calls(calls)\n+ assert actual == expected\n+\n+ def test_get_brokers_empty_cluster(self, mock_client):\nwith ZK(self.cluster_config) as zk:\nzk.get_children = mock.Mock(side_effect=NoNodeError())\nactual_with_no_node_error = zk.get_brokers()\n"}}},{"rowIdx":1128,"cells":{"message":{"kind":"string","value":"bootstrap_javascript use settings include_jquery\ntnx"},"diff":{"kind":"string","value":"@@ -282,7 +282,7 @@ def bootstrap_jquery(jquery='full'):\n@register.simple_tag\n-def bootstrap_javascript(jquery='falsy'):\n+def bootstrap_javascript(jquery=None):\n\"\"\"\nReturn HTML for Bootstrap JavaScript.\n@@ -315,7 +315,7 @@ def bootstrap_javascript(jquery='falsy'):\njavascript_tags = []\n# Set jquery value from setting or leave default.\n- jquery = get_bootstrap_setting('include_jquery') or jquery\n+ jquery = jquery or get_bootstrap_setting('include_jquery', 'falsy')\n# Include jQuery if the option is passed\nif jquery != 'falsy':\n"}}},{"rowIdx":1129,"cells":{"message":{"kind":"string","value":"tests/state_docs: clear registry before running the test.\nMake sure docs examples get consistent naming"},"diff":{"kind":"string","value":"@@ -3,6 +3,18 @@ import pytest\nimport psyneulink as pnl\nimport doctest\n+def clear_registry():\n+ from psyneulink.components.component import DeferredInitRegistry\n+ from psyneulink.components.system import SystemRegistry\n+ from psyneulink.components.process import ProcessRegistry\n+ from psyneulink.components.mechanisms.mechanism import MechanismRegistry\n+ from psyneulink.components.projections.projection import ProjectionRegistry\n+ # Clear Registry to have a stable reference for indexed suffixes of default names\n+ pnl.clear_registry(DeferredInitRegistry)\n+ pnl.clear_registry(SystemRegistry)\n+ pnl.clear_registry(ProcessRegistry)\n+ pnl.clear_registry(MechanismRegistry)\n+ pnl.clear_registry(ProjectionRegistry)\ndef test_state_docs():\n# get examples of mechanisms that can be used with GatingSignals/Mechanisms\n@@ -10,7 +22,8 @@ def test_state_docs():\ndef test_parameter_state_docs():\n- fail, total = doctest.testmod(pnl.components.states.parameterstate)\n+ clear_registry()\n+ fail, total = doctest.testmod(pnl.components.states.parameterstate, globs={})\nif fail > 0:\npytest.fail(\"{} out of {} examples failed\".format(fail, total),\n"}}},{"rowIdx":1130,"cells":{"message":{"kind":"string","value":"Fix when filter working on POST\nHG--\nbranch : feature/microservices"},"diff":{"kind":"string","value":"@@ -65,6 +65,8 @@ class ExtFormatMiddleware(object):\ndef process_request(self, request):\nif request.GET and request.GET.get(\"__format\") == \"ext\":\nrequest.is_extjs = True\n+ elif request.POST and request.POST.get(\"__format\") == \"ext\":\n+ request.is_extjs = True\nelse:\nrequest.is_extjs = False\n"}}},{"rowIdx":1131,"cells":{"message":{"kind":"string","value":"use addClassResourceCleanup in test_roles\nReplaces resource_cleanup with addClassResourceCleanup in\ntest_roles.\ntest_utils.call_and_ignore_notfound_exc is NOT used in resource_setup\nas delete_role_from_user_on_project and similar calls, do not delete\nthe role, it just unassigns the role from the target."},"diff":{"kind":"string","value":"@@ -32,6 +32,8 @@ class RolesV3TestJSON(base.BaseIdentityV3AdminTest):\nfor _ in range(3):\nrole_name = data_utils.rand_name(name='role')\nrole = cls.roles_client.create_role(name=role_name)['role']\n+ cls.addClassResourceCleanup(cls.roles_client.delete_role,\n+ role['id'])\ncls.roles.append(role)\nu_name = data_utils.rand_name('user')\nu_desc = '%s description' % u_name\n@@ -42,25 +44,23 @@ class RolesV3TestJSON(base.BaseIdentityV3AdminTest):\ndata_utils.rand_name('project'),\ndescription=data_utils.rand_name('project-desc'),\ndomain_id=cls.domain['id'])['project']\n+ cls.addClassResourceCleanup(cls.projects_client.delete_project,\n+ cls.project['id'])\ncls.group_body = cls.groups_client.create_group(\nname=data_utils.rand_name('Group'), project_id=cls.project['id'],\ndomain_id=cls.domain['id'])['group']\n+ cls.addClassResourceCleanup(cls.groups_client.delete_group,\n+ cls.group_body['id'])\ncls.user_body = cls.users_client.create_user(\nname=u_name, description=u_desc, password=cls.u_password,\nemail=u_email, project_id=cls.project['id'],\ndomain_id=cls.domain['id'])['user']\n+ cls.addClassResourceCleanup(cls.users_client.delete_user,\n+ cls.user_body['id'])\ncls.role = cls.roles_client.create_role(\nname=data_utils.rand_name('Role'))['role']\n-\n- @classmethod\n- def resource_cleanup(cls):\n- cls.roles_client.delete_role(cls.role['id'])\n- cls.groups_client.delete_group(cls.group_body['id'])\n- cls.users_client.delete_user(cls.user_body['id'])\n- cls.projects_client.delete_project(cls.project['id'])\n- for role in cls.roles:\n- cls.roles_client.delete_role(role['id'])\n- super(RolesV3TestJSON, cls).resource_cleanup()\n+ cls.addClassResourceCleanup(cls.roles_client.delete_role,\n+ cls.role['id'])\n@decorators.attr(type='smoke')\n@decorators.idempotent_id('18afc6c0-46cf-4911-824e-9989cc056c3a')\n"}}},{"rowIdx":1132,"cells":{"message":{"kind":"string","value":"Add build status to README\nLibrary can now successfully do nothing"},"diff":{"kind":"string","value":"# Manim - Mathematical Animation Engine\n[](https://manim.readthedocs.io/en/latest/?badge=latest)\n+[](https://travis-ci.org/3b1b/manim)\n[](http://choosealicense.com/licenses/mit/)\nManim is an animation engine for explanatory math videos. It's used to create precise animations programmatically, as seen in the videos at [3Blue1Brown](https://www.3blue1brown.com/).\n"}}},{"rowIdx":1133,"cells":{"message":{"kind":"string","value":"fixed duplication of representations\nnuke.api.plugin.ExporterReview adds representation explicitly via publish_on_farm, so skip adding repre if already there. (Issue in ExtractBurnin other way.)\nExporterReview should be probably refactored and publish_on_farm removed altogether."},"diff":{"kind":"string","value":"@@ -601,7 +601,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):\n\"files\": os.path.basename(remainder),\n\"stagingDir\": os.path.dirname(remainder),\n}\n- representations.append(rep)\nif \"render\" in instance.get(\"families\"):\nrep.update({\n\"fps\": instance.get(\"fps\"),\n@@ -609,6 +608,16 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):\n})\nself._solve_families(instance, True)\n+ already_there = False\n+ for repre in instance.get(\"representations\", []):\n+ # might be added explicitly before by publish_on_farm\n+ already_there = repre.get(\"files\") == rep[\"files\"]\n+ if already_there:\n+ break\n+ self.log.debug(\"repre {} already_there\".format(repre))\n+ if not already_there:\n+ representations.append(rep)\n+\nreturn representations\ndef _solve_families(self, instance, preview=False):\n"}}},{"rowIdx":1134,"cells":{"message":{"kind":"string","value":"Add more description to policies in the keypairs.py\nThis updates the policy doc for server extend controller in keypairs.py\nPartial implement blueprint blueprint policy-docs"},"diff":{"kind":"string","value":"# License for the specific language governing permissions and limitations\n# under the License.\n-from oslo_policy import policy\n-\nfrom nova.policies import base\n@@ -63,9 +61,20 @@ keypairs_policies = [\n'method': 'GET'\n}\n]),\n- policy.RuleDefault(\n- name=BASE_POLICY_NAME,\n- check_str=base.RULE_ADMIN_OR_OWNER),\n+ base.create_rule_default(\n+ BASE_POLICY_NAME,\n+ base.RULE_ADMIN_OR_OWNER,\n+ \"Return 'key_name' in the response of server.\",\n+ [\n+ {\n+ 'path': '/servers/{id}',\n+ 'method': 'GET',\n+ },\n+ {\n+ 'path': '/servers/detail',\n+ 'method': 'GET'\n+ }\n+ ]),\n]\n"}}},{"rowIdx":1135,"cells":{"message":{"kind":"string","value":"Fixed _custom_opac flag\nIf we specify opacity for every point, then we should set _custom_opac to true."},"diff":{"kind":"string","value":"@@ -1484,7 +1484,7 @@ class BasePlotter(PickingHelper, WidgetHelper):\nopacity = np.array(opacity)\nif scalars.shape[0] == opacity.shape[0]:\n# User could pass an array of opacities for every point/cell\n- pass\n+ _custom_opac = True\nelse:\nopacity = opacity_transfer_function(opacity, n_colors)\n"}}},{"rowIdx":1136,"cells":{"message":{"kind":"string","value":"Add warning about mounting relative paths\nand minor tweaks"},"diff":{"kind":"string","value":"@@ -72,11 +72,16 @@ Running mriqc\nautomatically be executed without need of running the command in item 3.\n+.. warning::\n+\n+ Paths `` and `` must be absolute. In particular, specifying relative paths for\n+ `` will generate no error and mriqc will run to completion without error but produce no output.\n+\n.. warning::\nFor security reasons, we recommend to run the docker command with the options\n``--read-only --tmpfs /run --tmpfs /tmp``. This will run the docker image in\n- read-only mode, and map the temporal folders ``/run`` and ``/tmp`` to the temporal\n+ read-only mode, and map the temporary folders ``/run`` and ``/tmp`` to the temporal\nfolder of the host.\n@@ -87,7 +92,7 @@ Explaining the mriqc-docker command line\nLet's dissect this command line:\n-+ :code:`docker run`- instructs the docker engine to get and run certain\n++ :code:`docker run`- instructs the docker engine to get and run a certain\nimage (which is the last of docker-related arguments:\n:code:`poldracklab/mriqc:latest`)\n+ :code:`-v :/data:ro` - instructs docker to mount the local\n"}}},{"rowIdx":1137,"cells":{"message":{"kind":"string","value":"Cast regularization parameters to float.\nThis works around a bug in earlier proto versions\nthat automatically infer these values to be integer\ninstead of float."},"diff":{"kind":"string","value":"@@ -111,9 +111,9 @@ def _build_regularizer(regularizer):\n\"\"\"\nregularizer_oneof = regularizer.WhichOneof('regularizer_oneof')\nif regularizer_oneof == 'l1_regularizer':\n- return slim.l1_regularizer(scale=regularizer.l1_regularizer.weight)\n+ return slim.l1_regularizer(scale=float(regularizer.l1_regularizer.weight))\nif regularizer_oneof == 'l2_regularizer':\n- return slim.l2_regularizer(scale=regularizer.l2_regularizer.weight)\n+ return slim.l2_regularizer(scale=float(regularizer.l2_regularizer.weight))\nraise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof))\n"}}},{"rowIdx":1138,"cells":{"message":{"kind":"string","value":"DOC: Update TESTS.rst to use the correct names\nNot actually sure that setup_module() is what was wanted here, but\nit works?\nMention a bit more about actual pytest fixtures."},"diff":{"kind":"string","value":"@@ -178,30 +178,33 @@ Similarly for methods::\nEasier setup and teardown functions / methods\n---------------------------------------------\n-Testing looks for module-level or class-level setup and teardown functions by\n-name; thus::\n+Testing looks for module-level or class method-level setup and teardown\n+functions by name; thus::\n- def setup():\n+ def setup_module():\n\"\"\"Module-level setup\"\"\"\nprint('doing setup')\n- def teardown():\n+ def teardown_module():\n\"\"\"Module-level teardown\"\"\"\nprint('doing teardown')\nclass TestMe:\n- def setup():\n+ def setup_method(self):\n\"\"\"Class-level setup\"\"\"\nprint('doing setup')\n- def teardown():\n+ def teardown_method():\n\"\"\"Class-level teardown\"\"\"\nprint('doing teardown')\nSetup and teardown functions to functions and methods are known as \"fixtures\",\n-and their use is not encouraged.\n+and they should be used sparingly.\n+``pytest`` supports more general fixture at various scopes which may be used\n+automatically via special arguments. For example, the special argument name\n+``tmpdir`` is used in test to create a temporary directory.\nParametric tests\n----------------\n"}}},{"rowIdx":1139,"cells":{"message":{"kind":"string","value":"Optimize mesh export using np.fromiter.\nMade optimization of mesh export using np.fromiter() instead of creating creating python lists of mesh data. On my tests it speedups export process more than 2 times, on some scenes ore than 4 times."},"diff":{"kind":"string","value":"@@ -43,24 +43,26 @@ class MeshData:\nif tris_len == 0:\nraise SyncError(\"Mesh %s has no polygons\" % mesh.name, mesh)\n- data.vertices = np.array([vert.co for vert in mesh.vertices], dtype=np.float32)\n- data.normals = np.array(\n- [norm for tri in mesh.loop_triangles\n- for norm in tri.split_normals],\n- dtype=np.float32\n- )\n+ data.vertices = np.fromiter(\n+ (x for vert in mesh.vertices for x in vert.co),\n+ dtype=np.float32).reshape((len(mesh.vertices), 3))\n+ data.normals = np.fromiter(\n+ (x for tri in mesh.loop_triangles for norm in tri.split_normals for x in norm),\n+ dtype=np.float32).reshape((tris_len * 3, 3))\ndata.uvs = None\ndata.uv_indices = None\nif len(mesh.uv_layers) > 0:\nuv_layer = mesh.uv_layers.active\n- uvs = np.array([[d.uv.x, d.uv.y] for d in uv_layer.data], dtype=np.float32)\n+ uvs = np.fromiter(\n+ (x for d in uv_layer.data for x in d.uv),\n+ dtype=np.float32).reshape((len(uv_layer.data), 2))\nif len(uvs) > 0:\ndata.uvs = uvs\n- data.uv_indices = np.array([tri.loops for tri in mesh.loop_triangles], dtype=np.int32).reshape((tris_len * 3,))\n+ data.uv_indices = np.fromiter((x for tri in mesh.loop_triangles for x in tri.loops), dtype=np.int32)\ndata.num_face_vertices = np.full((tris_len,), 3, dtype=np.int32)\n- data.vertex_indices = np.array([tri.vertices for tri in mesh.loop_triangles], dtype=np.int32).reshape((tris_len * 3,))\n+ data.vertex_indices = np.fromiter((x for tri in mesh.loop_triangles for x in tri.vertices), dtype=np.int32)\ndata.normal_indices = np.arange(tris_len * 3, dtype=np.int32)\nif calc_area:\n"}}},{"rowIdx":1140,"cells":{"message":{"kind":"string","value":"Add ContainerImagePrepare service to CellController role\nThe CellController role does not have ContainerImagePrepare\nservice. This result in empty external_deploy_steps_tasks.yaml\nand does not update container images when trying to update\nthe cell stack.\nCloses-Bug:"},"diff":{"kind":"string","value":"- OS::TripleO::Services::CertmongerUser\n- OS::TripleO::Services::Clustercheck\n- OS::TripleO::Services::Collectd\n+ - OS::TripleO::Services::ContainerImagePrepare\n- OS::TripleO::Services::Docker\n- OS::TripleO::Services::Fluentd\n- OS::TripleO::Services::HAproxy\n"}}},{"rowIdx":1141,"cells":{"message":{"kind":"string","value":"Added SLIs, SLOs and Burn rate Alerts section\nSLIs, SLOs and Burn rate Alerts section documentation, need to add pictures.\n+fixed typos in slos.tf"},"diff":{"kind":"string","value":"# See the License for the specific language governing permissions and\n# limitations under the License.\n-# Create an SLO for availablity for the custom service.\n+# Create an SLO for availability for the custom service.\n# Example SLO is defined as following:\n# 90% of all non-4XX requests within the past 30 day windowed period\n# return with 200 OK status\n@@ -89,7 +89,7 @@ resource \"google_monitoring_slo\" \"custom_service_latency_slo\" {\n}\n}\n-# Create an SLO for availablity for the Istio service.\n+# Create an SLO for availability for the Istio service.\n# Example SLO is defined as following:\n# 90% of all non-4XX requests within the past 30 day windowed period\n# return with 200 OK status\n@@ -249,7 +249,7 @@ resource \"google_monitoring_slo\" \"rating_service_latency_slo\" {\n}\n}\n-# Rating data freshness SLO:\n+# Rating service's data freshness SLO:\n# during a day 99.9% of minutes have at least 1 successful recollect API call\nresource \"google_monitoring_slo\" \"rating_service_freshness_slo\" {\n# Uses ratingservice service that is automatically detected and created when the service is deployed to App Engine\n"}}},{"rowIdx":1142,"cells":{"message":{"kind":"string","value":"generate_adhoc_ssl_pair: make issuer match subject\nWith this change, the generated certificate can be trusted,\nand the following command starts working:\nopenssl s_client -showcerts -connect dev:443 -verifyCAfile dev.crt =0.5.0 # Apache-2.0\neventlet!=0.18.3,>=0.18.2 # MIT\nWebOb>=1.6.0 # MIT\ngreenlet>=0.3.2 # MIT\n-netaddr!=0.7.16,>=0.7.13 # BSD\nparamiko>=2.0 # LGPLv2.1+\npython-neutronclient>=5.1.0 # Apache-2.0\npython-glanceclient>=2.5.0 # Apache-2.0\n"}}},{"rowIdx":1148,"cells":{"message":{"kind":"string","value":"Create compilation passes for ASTNode kinds and final structs processing\nTN:"},"diff":{"kind":"string","value":"@@ -30,9 +30,7 @@ from mako.lookup import TemplateLookup\nfrom langkit import caching, names, template_utils\nfrom langkit.ada_api import AdaAPISettings\nfrom langkit.c_api import CAPISettings\n-from langkit.diagnostics import (\n- Severity, check_source_language, errors_checkpoint\n-)\n+from langkit.diagnostics import Severity, check_source_language\nimport langkit.documentation\nfrom langkit.expressions import PropertyDef\nfrom langkit.passes import (\n@@ -948,31 +946,21 @@ class CompileCtx(object):\nGlobalPass('annotate fields types',\nCompileCtx.annotate_fields_types,\ndisabled=not annotate_fields_types),\n+ GlobalPass('compute ASTNode kind constants',\n+ CompileCtx.compute_node_kind_constants),\n+\n+ # Now that all Struct subclasses referenced by the grammar have\n+ # been typed, iterate over all declared subclasses to register the\n+ # ones that are unreachable from the grammar. TODO: this kludge\n+ # will eventually disappear as part of OC22-016.\n+ GlobalPass('add structs to context',\n+ CompileCtx.add_structs_to_context),\n+ errors_checkpoint_pass,\n)\nwith names.camel_with_underscores:\npass_manager.run(self)\n- for i, astnode in enumerate(\n- (astnode\n- for astnode in self.astnode_types\n- if not astnode.abstract),\n- # Compute kind constants for all ASTNode concrete subclasses.\n- # Start with 1: the constant 0 is reserved as an\n- # error/uninitialized code.\n- start=1\n- ):\n- self.node_kind_constants[astnode] = i\n-\n- # Now that all Struct subclasses referenced by the grammar have been\n- # typed, iterate over all declared subclasses to register the ones that\n- # are unreachable from the grammar. TODO: this kludge will eventually\n- # disappear as part of OC22-016.\n- for t in self.struct_types + self.astnode_types:\n- t.add_to_context()\n-\n- errors_checkpoint()\n-\ndef _emit(self, file_root, generate_lexer, main_source_dirs,\nmain_programs):\n\"\"\"\n@@ -1300,3 +1288,25 @@ class CompileCtx(object):\n[\"-f\", \"annotate_fields_types\",\n\"--no-diff\", \"-w\"] + list(astnodes_files)\n)\n+\n+ def compute_node_kind_constants(self):\n+ \"\"\"\n+ Compute kind constants for all ASTNode concrete subclasses.\n+ \"\"\"\n+ for i, astnode in enumerate(\n+ (astnode\n+ for astnode in self.astnode_types\n+ if not astnode.abstract),\n+ # Start with 1: the constant 0 is reserved as an\n+ # error/uninitialized code.\n+ start=1\n+ ):\n+ self.node_kind_constants[astnode] = i\n+\n+ def add_structs_to_context(self):\n+ \"\"\"\n+ Make sure all Struct subclasses (including ASTNode ones) are added to\n+ the context.\n+ \"\"\"\n+ for t in self.struct_types + self.astnode_types:\n+ t.add_to_context()\n"}}},{"rowIdx":1149,"cells":{"message":{"kind":"string","value":"Association connect should not blindly assume memberEnds\nIn the rare case memberEnd instances are missing, we should just\ndo nothing."},"diff":{"kind":"string","value":"@@ -79,13 +79,16 @@ class AssociationConnect(RelationshipConnect):\nsubject = line.subject\ndef member_ends_match(subject):\n- return (\n+ return len(subject.memberEnd) >= 2 and (\n+ (\nhead_subject is subject.memberEnd[0].type\nand tail_subject is subject.memberEnd[1].type\n- ) or (\n+ )\n+ or (\nhead_subject is subject.memberEnd[1].type\nand tail_subject is subject.memberEnd[0].type\n)\n+ )\n# First check if the right subject is already connected:\nif line.subject and member_ends_match(line.subject):\n"}}},{"rowIdx":1150,"cells":{"message":{"kind":"string","value":"Using snapshot alf/examples\nWhen playing a trained model with alf snapshot, we should also set redirect the python path to its examples directory in case some conf files have been changed."},"diff":{"kind":"string","value":"@@ -1103,8 +1103,9 @@ def get_alf_snapshot_env_vars(root_dir):\nalf_repo = os.path.join(root_dir, \"alf\")\nalf_cnest = os.path.join(alf_repo,\n\"alf/nest/cnest\") # path to archived cnest.so\n+ alf_examples = os.path.join(alf_repo, \"alf/examples\")\npython_path = os.environ.get(\"PYTHONPATH\", \"\")\n- python_path = \":\".join([alf_repo, alf_cnest, python_path])\n+ python_path = \":\".join([alf_repo, alf_cnest, alf_examples, python_path])\nenv_vars = copy.copy(os.environ)\nenv_vars.update({\"PYTHONPATH\": python_path})\nreturn env_vars\n"}}},{"rowIdx":1151,"cells":{"message":{"kind":"string","value":"Update libc.math tests\ncimport some C99 float and long double functions, and test legacy kwargs\nfor double functions."},"diff":{"kind":"string","value":"from libc.math cimport (M_E, M_LOG2E, M_LOG10E, M_LN2, M_LN10, M_PI, M_PI_2,\nM_PI_4, M_1_PI, M_2_PI, M_2_SQRTPI, M_SQRT2, M_SQRT1_2)\n-from libc.math cimport (acos, asin, atan, atan2, cos, sin, tan, cosh, sinh,\n- tanh, acosh, asinh, atanh, exp, log, log10, pow, sqrt)\n+from libc.math cimport (acos, asin, atan, atan2, cos, sin, sinf, sinl, tan,\n+ cosh, sinh, tanh, acosh, asinh, atanh, exp, log, log10, pow, sqrt)\ncimport libc.math as libc_math\n@@ -34,3 +34,11 @@ def test_sin(x):\n[True, True, True, True, True, True, True, True, True, True]\n\"\"\"\nreturn sin(x)\n+\n+\n+def test_sin_kwarg(x):\n+ \"\"\"\n+ >>> test_sin_kwarg(0)\n+ 0.0\n+ \"\"\"\n+ return sin(x=x)\n"}}},{"rowIdx":1152,"cells":{"message":{"kind":"string","value":"feat: archiving pipelines\n$feat: add archive jobs BE integration\n$feat: add tests for archive jobs button"},"diff":{"kind":"string","value":"@@ -3,6 +3,7 @@ from dbnd._vendor.marshmallow import fields, validate\nclass JobSchemaV2(ApiObjectSchema):\n+ id = fields.Int()\nname = fields.Str()\nuser = fields.Str()\nui_hidden = fields.Boolean()\n"}}},{"rowIdx":1153,"cells":{"message":{"kind":"string","value":"For the NotificationWithTemplateSchema exclude the scheduled_notifications so we do not query that table.\nThe scheduled_notifications is not used as of yet."},"diff":{"kind":"string","value":"@@ -449,7 +449,7 @@ class NotificationWithTemplateSchema(BaseSchema):\nclass Meta:\nmodel = models.Notification\nstrict = True\n- exclude = ('_personalisation', )\n+ exclude = ('_personalisation', 'scheduled_notification')\ntemplate = fields.Nested(\nTemplateSchema,\n"}}},{"rowIdx":1154,"cells":{"message":{"kind":"string","value":"Update facades for 2.9 release\nThe following updates the facades to prevent spurious warnings about\nmissing facades. Although it logs, because nothing has been coded to the\nfacades we can safely add them without any consequence."},"diff":{"kind":"string","value":"@@ -29,13 +29,17 @@ client_facades = {\n'Backups': {'versions': [1, 2]},\n'Block': {'versions': [2]},\n'Bundle': {'versions': [1, 2, 3]},\n+ 'CharmHub': {'versions': [1]},\n'CharmRevisionUpdater': {'versions': [2]},\n'Charms': {'versions': [2]},\n'Cleaner': {'versions': [2]},\n'Client': {'versions': [1, 2]},\n'Cloud': {'versions': [1, 2, 3, 4, 5]},\n'CAASAdmission': {'versions': [1]},\n+ 'CAASApplication': {'versions': [1]},\n+ 'CAASApplicationProvisioner': {'versions': [1]},\n'CAASFirewaller': {'versions': [1]},\n+ 'CAASFirewallerEmbedded': {'versions': [1]},\n'CAASOperator': {'versions': [1]},\n'CAASAgent': {'versions': [1]},\n'CAASOperatorProvisioner': {'versions': [1]},\n"}}},{"rowIdx":1155,"cells":{"message":{"kind":"string","value":"Only configure flint2 once\nIf we've run configure before and a Makefile exists, let make figure out whether a recompile is necessary of flint2"},"diff":{"kind":"string","value":"@@ -8,7 +8,9 @@ pip install -r requirements.txt\n# Check for git clone of flint2 on MacOS and install if found\nif [ -f flint2/configure ]; then\ncd flint2/\n+ if [ ! -f Makefile ]; then\n./configure\n+ fi\nmake -j4\nmake install\ncd ../\n"}}},{"rowIdx":1156,"cells":{"message":{"kind":"string","value":"polys: avoid unnecessary using numbered_symbols() in primitive_element()\nAlso drop redundant polys option"},"diff":{"kind":"string","value":"@@ -674,14 +674,14 @@ def primitive_element(extension, **args):\nx = Dummy('x')\ndomain = args.get('domain', QQ)\n- F, Y = zip(*[(minimal_polynomial(e, domain=domain).replace(y), y)\n- for e, y in zip(extension, numbered_symbols('y', cls=Dummy))])\n+ F = [minimal_polynomial(e, domain=domain) for e in extension]\n+ Y = [p.gen for p in F]\nfor u in range(1, (len(F) - 1)*prod(f.degree() for f in F) + 1):\ncoeffs = [u**n for n in range(len(Y))]\nf = x - sum(c*y for c, y in zip(coeffs, Y))\n- *H, g = groebner(F + (f,), Y + (x,), domain=domain, polys=True)\n+ *H, g = groebner(F + [f], Y + [x], domain=domain)\nfor i, (h, y) in enumerate(zip(H, Y)):\nH[i] = (y - h).eject(*Y).retract(field=True)\n"}}},{"rowIdx":1157,"cells":{"message":{"kind":"string","value":"Fix LTE _init_\nHG--\nbranch : feature/microservices"},"diff":{"kind":"string","value":"@@ -15,6 +15,8 @@ from noc.core.profile.base import BaseProfile\nclass Profile(BaseProfile):\nname = \"Eltex.LTE\"\npattern_username = r\"(? 5 and (filename[-5:] == \".html\" or filename[-5:] == \".docx\" or filename[-5:] == \".epub\"):\n+ '''if len(filename) > 5 and (filename[-5:] == \".html\" or filename[-5:] == \".docx\" or filename[-5:] == \".epub\"):\nfilename_txt = filename[0:len(filename) - 5] + \".txt\"\nif len(filename) > 4 and (filename[-4:] == \".htm\" or filename[-4:] == \".odt\" or filename[-4] == \".txt\"):\n- filename_txt = filename[0:len(filename) - 4] + \".txt\"\n+ filename_txt = filename[0:len(filename) - 4] + \".txt\" '''\n# Below is for transcribed files and for user created text files within QualCoder\nif self.source[row]['mediapath'] is None and filename_txt is None:\nfilename_txt = filename + \".txt\"\n"}}},{"rowIdx":1170,"cells":{"message":{"kind":"string","value":"Classes for extensions\nAdded for both extensions and lnfaucet db"},"diff":{"kind":"string","value":"@@ -30,3 +30,62 @@ class Database:\n\"\"\"Given a query, cursor.execute() it.\"\"\"\nself.cursor.execute(query, values)\nself.connection.commit()\n+\n+\n+class ExtDatabase:\n+ def __init__(self, db_path: str = os.path.join(LNBITS_PATH, \"extensions\", \"overview.sqlite3\")):\n+ self.path = db_path\n+ self.connection = sqlite3.connect(db_path)\n+ self.connection.row_factory = sqlite3.Row\n+ self.cursor = self.connection.cursor()\n+\n+ def __enter__(self):\n+ return self\n+\n+ def __exit__(self, exc_type, exc_val, exc_tb):\n+ self.cursor.close()\n+ self.connection.close()\n+\n+ def fetchall(self, query: str, values: tuple) -> list:\n+ \"\"\"Given a query, return cursor.fetchall() rows.\"\"\"\n+ self.cursor.execute(query, values)\n+ return self.cursor.fetchall()\n+\n+ def fetchone(self, query: str, values: tuple):\n+ self.cursor.execute(query, values)\n+ return self.cursor.fetchone()\n+\n+ def execute(self, query: str, values: tuple) -> None:\n+ \"\"\"Given a query, cursor.execute() it.\"\"\"\n+ self.cursor.execute(query, values)\n+ self.connection.commit()\n+\n+\n+class FauDatabase:\n+ def __init__(self, db_path: str = os.path.join(LNBITS_PATH, \"extensions\", \"lnurlfaucet\", \"database.sqlite3\")):\n+ self.path = db_path\n+ self.connection = sqlite3.connect(db_path)\n+ self.connection.row_factory = sqlite3.Row\n+ self.cursor = self.connection.cursor()\n+\n+ def __enter__(self):\n+ return self\n+\n+ def __exit__(self, exc_type, exc_val, exc_tb):\n+ self.cursor.close()\n+ self.connection.close()\n+\n+ def fetchall(self, query: str, values: tuple) -> list:\n+ \"\"\"Given a query, return cursor.fetchall() rows.\"\"\"\n+ self.cursor.execute(query, values)\n+ return self.cursor.fetchall()\n+\n+ def fetchone(self, query: str, values: tuple):\n+ self.cursor.execute(query, values)\n+ return self.cursor.fetchone()\n+\n+ def execute(self, query: str, values: tuple) -> None:\n+ \"\"\"Given a query, cursor.execute() it.\"\"\"\n+ self.cursor.execute(query, values)\n+ self.connection.commit()\n+\n"}}},{"rowIdx":1171,"cells":{"message":{"kind":"string","value":"[bugfix] Fix _formatLimit_MonthOfYear\nLimit is given as 1900 but not recognized by predicate"},"diff":{"kind":"string","value":"@@ -2152,7 +2152,7 @@ formatLimits = {\n}\n# All month of year articles are in the same format\n-_formatLimit_MonthOfYear = (lambda v: 1 <= 1900 and v < 2051, 1900, 2051)\n+_formatLimit_MonthOfYear = (lambda v: 1900 <= v < 2051, 1900, 2051)\nfor month in yrMnthFmts:\nformatLimits[month] = _formatLimit_MonthOfYear\n"}}},{"rowIdx":1172,"cells":{"message":{"kind":"string","value":"test: Misc update in test_tutorial\nAdd missing remove_target call from \"Delegate to Hashed Bins\"\nsection\nAdd comments to dirty_roles output assertion"},"diff":{"kind":"string","value":"@@ -134,7 +134,8 @@ class TestTutorial(unittest.TestCase):\nrepository.root.load_signing_key(private_root_key)\nrepository.root.load_signing_key(private_root_key2)\n- # Patch logger to assert that it accurately logs dirty roles\n+ # NOTE: The tutorial does not call dirty_roles anymore due to #964 and\n+ # #958. We still call it here to see if roles are dirty as expected.\nwith mock.patch(\"tuf.repository_tool.logger\") as mock_logger:\nrepository.dirty_roles()\n# Concat strings to avoid Python2/3 unicode prefix problems ('' vs. u'')\n@@ -186,6 +187,8 @@ class TestTutorial(unittest.TestCase):\nrepository.timestamp.expiration = datetime.datetime(2080, 10, 28, 12, 8)\n+ # NOTE: The tutorial does not call dirty_roles anymore due to #964 and\n+ # #958. We still call it here to see if roles are dirty as expected.\nwith mock.patch(\"tuf.repository_tool.logger\") as mock_logger:\nrepository.dirty_roles()\n# Concat strings to avoid Python2/3 unicode prefix problems ('' vs. u'')\n@@ -265,7 +268,8 @@ class TestTutorial(unittest.TestCase):\n'timestamp_key', 'password')\nrepository.timestamp.load_signing_key(private_timestamp_key)\n- # Patch logger to assert that it accurately logs dirty roles\n+ # NOTE: The tutorial does not call dirty_roles anymore due to #964 and\n+ # #958. We still call it here to see if roles are dirty as expected.\nwith mock.patch(\"tuf.repository_tool.logger\") as mock_logger:\nrepository.dirty_roles()\n# Concat strings to avoid Python2/3 unicode prefix problems ('' vs. u'')\n@@ -278,6 +282,8 @@ class TestTutorial(unittest.TestCase):\nself.assertTrue(os.path.exists(os.path.join(\n'repository','targets', 'myproject', 'file4.txt')))\n+ # NOTE: The tutorial does not call dirty_roles anymore due to #964 and\n+ # #958. We still call it here to see if roles are dirty as expected.\nwith mock.patch(\"tuf.repository_tool.logger\") as mock_logger:\nrepository.dirty_roles()\n# Concat strings to avoid Python2/3 unicode prefix problems ('' vs. u'')\n@@ -318,7 +324,8 @@ class TestTutorial(unittest.TestCase):\n'unclaimed_key', 'password')\nrepository.targets(\"unclaimed\").load_signing_key(private_unclaimed_key)\n-\n+ # NOTE: The tutorial does not call dirty_roles anymore due to #964 and\n+ # #958. We still call it here to see if roles are dirty as expected.\nwith mock.patch(\"tuf.repository_tool.logger\") as mock_logger:\nrepository.dirty_roles()\n# Concat strings to avoid Python2/3 unicode prefix problems ('' vs. u'')\n@@ -337,6 +344,7 @@ class TestTutorial(unittest.TestCase):\n# ----- Tutorial Section: Delegate to Hashed Bins\n+ repository.targets('unclaimed').remove_target(\"myproject/file4.txt\")\ntargets = repository.get_filepaths_in_directory(\nos.path.join('repository', 'targets', 'myproject'), recursive_walk=True)\n@@ -362,10 +370,11 @@ class TestTutorial(unittest.TestCase):\n])\n-\nfor delegation in repository.targets('unclaimed').delegations:\ndelegation.load_signing_key(private_unclaimed_key)\n+ # NOTE: The tutorial does not call dirty_roles anymore due to #964 and\n+ # #958. We still call it here to see if roles are dirty as expected.\nwith mock.patch(\"tuf.repository_tool.logger\") as mock_logger:\nrepository.dirty_roles()\n# Concat strings to avoid Python2/3 unicode prefix problems ('' vs. u'')\n"}}},{"rowIdx":1173,"cells":{"message":{"kind":"string","value":"Fix 'navigation_depth' functionality\nRead the Docs was not using the sphinx_rtd_theme settings due to\nclobbering the configuration dictionary, tweaked conf.py to avoid this."},"diff":{"kind":"string","value":"# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n+import importlib\nimport os\nimport warnings\n# import sys\n@@ -61,12 +62,9 @@ warnings.filterwarnings(\"ignore\", category=UserWarning,\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n-on_rtd = os.environ.get('READTHEDOCS') == 'True'\n-if on_rtd:\n- html_theme = 'default'\n-else:\nimport sphinx_rtd_theme\nhtml_theme = 'sphinx_rtd_theme'\n+html_style = None\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n# Add any paths that contain custom static files (such as style sheets) here,\n"}}},{"rowIdx":1174,"cells":{"message":{"kind":"string","value":"Enable tuples and lists in handle_probability_param()\nhandle_probability_param() in parameters.py has so\nfar only supported single numbers, True, False and\nStochasticParameter. Now it also supports tuples\nof form (a, b), which are transformed to Uniform\nand lists of form [a, b, c, ...], which are\ntransformed to Choice. These are useful for masks."},"diff":{"kind":"string","value":"@@ -105,7 +105,7 @@ def handle_discrete_param(param, name, value_range=None, tuple_to_uniform=True,\nlist_str = \", list of %s\" % (allowed_type,) if list_to_choice else \"\"\nraise Exception(\"Expected %s, tuple of two %s%s or StochasticParameter for %s, got %s.\" % (allowed_type, allowed_type, list_str, name, type(param),))\n-def handle_probability_param(param, name):\n+def handle_probability_param(param, name, tuple_to_uniform=False, list_to_choice=False):\neps = 1e-6\nif param in [True, False, 0, 1]:\nreturn Deterministic(int(param))\n@@ -115,6 +115,20 @@ def handle_probability_param(param, name):\nreturn Deterministic(int(round(param)))\nelse:\nreturn Binomial(param)\n+ elif tuple_to_uniform and isinstance(param, tuple):\n+ ia.do_assert(all([\n+ ia.is_single_number(v) for v in param\n+ ]), \"Expected parameter '%s' of type tuple to only contain number, got %s.\" % (name, [type(v) for v in param],))\n+ ia.do_assert(len(param) == 2)\n+ ia.do_assert(0 <= param[0] <= 1.0)\n+ ia.do_assert(0 <= param[1] <= 1.0)\n+ return Binomial(Uniform(param[0], param[1]))\n+ elif list_to_choice and ia.is_iterable(param):\n+ ia.do_assert(all([\n+ ia.is_single_number(v) for v in param\n+ ]), \"Expected iterable parameter '%s' to only contain number, got %s.\" % (name, [type(v) for v in param],))\n+ ia.do_assert(all([0 <= p_i <= 1.0 for p_i in param]))\n+ return Binomial(Choice(param))\nelif isinstance(param, StochasticParameter):\nreturn param\nelse:\n"}}},{"rowIdx":1175,"cells":{"message":{"kind":"string","value":"GDB helpers: emit bind directives for BindingScope\nTN:"},"diff":{"kind":"string","value":"@@ -13,8 +13,8 @@ import funcy\nfrom langkit import names\nfrom langkit.compiled_types import (\nAbstractNodeData, Argument, ASTNode, BoolType, CompiledType,\n- LexicalEnvType, LongType, Symbol, T, Token, get_context,\n- render as ct_render, resolve_type, EnvRebindingsType\n+ EnvRebindingsType, LexicalEnvType, LongType, Symbol, T, Token,\n+ gdb_bind_var, get_context, render as ct_render, resolve_type\n)\nfrom langkit.diagnostics import (\nContext, DiagnosticError, Severity, check_multiple, check_source_language,\n@@ -1269,7 +1269,10 @@ class BindingScope(ResolvedExpression):\nself.static_type = self.expr.type\ndef _render_pre(self):\n- return self.expr._render_pre()\n+ return '\\n'.join(\n+ [gdb_bind_var(binding) for binding in self.expr_bindings]\n+ + [self.expr._render_pre()]\n+ )\ndef _render_expr(self):\nreturn self.expr._render_expr()\n"}}},{"rowIdx":1176,"cells":{"message":{"kind":"string","value":"Deletion: remove outer try/except block in reaper run_once\nrun_daemon already takes care of catching unhandled exceptions\nand re-trying the run_once function."},"diff":{"kind":"string","value":"@@ -463,7 +463,7 @@ def _run_once(rses, include_rses, exclude_rses, vos, chunk_size, greedy, scheme,\nif not rses_to_process:\nlogger(logging.ERROR, 'Reaper: No RSEs found. Will sleep for 30 seconds')\nreturn\n- try:\n+\ndict_rses = {}\n_, total_workers, logger = heartbeat_handler.live()\ntot_needed_free_space = 0\n@@ -601,11 +601,6 @@ def _run_once(rses, include_rses, exclude_rses, vos, chunk_size, greedy, scheme,\nif paused_rses:\nlogger(logging.INFO, 'Deletion paused for a while for following RSEs: %s', ', '.join(paused_rses))\n- except DatabaseException as error:\n- logger(logging.WARNING, 'Reaper: %s', str(error))\n- except Exception:\n- logger(logging.CRITICAL, 'Exception', exc_info=True)\n-\ndef stop(signum=None, frame=None):\n\"\"\"\n"}}},{"rowIdx":1177,"cells":{"message":{"kind":"string","value":"Fix Extreme.XOS.get_capabilities script\nHG--\nbranch : feature/microservices"},"diff":{"kind":"string","value":"@@ -17,7 +17,7 @@ from noc.lib.text import parse_table\nclass Script(BaseScript):\nname = \"Extreme.XOS.get_capabilities\"\n- rx_lldp = re.compile(r\"^\\s*\\d+\\s+Enabled\\s+Enabled\", re.MULTILINE)\n+ rx_lldp = re.compile(r\"^\\s*\\d+(\\:\\d+)?\\s+Enabled\\s+Enabled\", re.MULTILINE)\nrx_cdp = re.compile(r\"^\\s*CDP \\S+ enabled ports\\s+:\\s+\\d+\", re.MULTILINE)\n@false_on_cli_error\n"}}},{"rowIdx":1178,"cells":{"message":{"kind":"string","value":"$.Introspection: enhance documentation\nTN:"},"diff":{"kind":"string","value":"@@ -6,7 +6,7 @@ package ${ada_lib_name}.Introspection is\nInvalid_Field : exception;\n- ## In a lot of testcases, there is a single concrete AST node that has no\n+ ## In a lot of testcases, there is a single concrete node that has no\n## field. For these, generates a type that has no valid value.\ntype Field_Reference is\n% if ctx.sorted_parse_fields:\n@@ -16,25 +16,26 @@ package ${ada_lib_name}.Introspection is\nnew Integer range 1 .. 0\n% endif\n;\n- -- Enumeration of all AST node fields\n+ -- Enumeration of all node fields\nfunction Field_Name (Field : Field_Reference) return String;\n-- Return a lower-case name for Field\nfunction Index (Field : Field_Reference) return Positive;\n- -- Return the index in AST nodes to access the given Field\n+ -- Return the index in nodes to access the given ``Field``\nfunction Field_Reference_From_Index\n(Kind : ${root_node_kind_name}; Index : Positive) return Field_Reference;\n- -- Return the field reference corresponding to the given Index in AST nodes\n- -- of the given Kind. Raise an Invalid_Field if there is no field\n- -- corresponding to this index.\n+ -- Return the field reference corresponding to the given ``Index`` in nodes\n+ -- of the given ``Kind``. Raise an ``Invalid_Field`` exception if there is\n+ -- no field corresponding to this index.\ntype Field_Reference_Array is array (Positive range <>) of Field_Reference;\nfunction Fields\n(Kind : ${root_node_kind_name}) return Field_Reference_Array;\n- -- Return the list of fields that AST nodes of the given Kind have\n+ -- Return the list of fields that nodes of the given ``Kind`` have. This\n+ -- returns an empty array for list nodes.\nfunction Token_Node_Kind\n(Kind : ${root_node_kind_name}) return Token_Kind\n@@ -42,7 +43,7 @@ package ${ada_lib_name}.Introspection is\n-- Return the token kind corresponding to the given token node kind\n% if not ctx.generate_unparser:\n--\n- -- As unparser are not generated, this always raises a Program_Error\n+ -- As unparser are not generated, this always raises a ``Program_Error``\n-- exception.\n% endif\n"}}},{"rowIdx":1179,"cells":{"message":{"kind":"string","value":"Typo ?\nI removed \"-e\" option from \"pip install -e dist/*.whl # installs jaxlib (includes XLA)\" line 58. It is now coherent with lines 69-70.\nWhen I tried the command with the \"-e\" it threw an error, without \"-e\" it worked fine."},"diff":{"kind":"string","value":"@@ -55,7 +55,7 @@ You can install the necessary Python dependencies using ``pip``::\nTo build ``jaxlib`` with CUDA support, you can run::\npython build/build.py --enable_cuda\n- pip install -e dist/*.whl # installs jaxlib (includes XLA)\n+ pip install dist/*.whl # installs jaxlib (includes XLA)\nSee ``python build/build.py --help`` for configuration options, including ways to\n"}}},{"rowIdx":1180,"cells":{"message":{"kind":"string","value":"Fix arguments parsing in RandomGhosting\nFixes"},"diff":{"kind":"string","value":"@@ -50,22 +50,40 @@ class RandomGhosting(RandomTransform):\nif axis not in (0, 1, 2):\nraise ValueError(f'Axes must be in (0, 1, 2), not \"{axes}\"')\nself.axes = axes\n- if isinstance(num_ghosts, int):\n- self.num_ghosts_range = num_ghosts, num_ghosts\n- elif isinstance(num_ghosts, tuple) and len(num_ghosts) == 2:\n- self.num_ghosts_range = num_ghosts\n- self.intensity_range = self.parse_range(intensity, 'intensity')\n- for n in self.intensity_range:\n- if n < 0:\n- message = (\n- f'Intensity must be a positive number, not {n}')\n- raise ValueError(message)\n+ self.num_ghosts_range = self.parse_num_ghosts(num_ghosts)\n+ self.intensity_range = self.parse_intensity(intensity)\nif not 0 <= restore < 1:\nmessage = (\nf'Restore must be a number between 0 and 1, not {restore}')\nraise ValueError(message)\nself.restore = restore\n+ @staticmethod\n+ def parse_num_ghosts(num_ghosts):\n+ try:\n+ iter(num_ghosts)\n+ except TypeError:\n+ num_ghosts = num_ghosts, num_ghosts\n+ for n in num_ghosts:\n+ if not isinstance(n, int) or n < 0:\n+ message = (\n+ f'Number of ghosts must be a natural number, not {n}')\n+ raise ValueError(message)\n+ return num_ghosts\n+\n+ @staticmethod\n+ def parse_intensity(intensity):\n+ try:\n+ iter(intensity)\n+ except TypeError:\n+ intensity = intensity, intensity\n+ for n in intensity:\n+ if n < 0:\n+ message = (\n+ f'Intensity must be a positive number, not {n}')\n+ raise ValueError(message)\n+ return intensity\n+\ndef apply_transform(self, sample: Subject) -> dict:\nrandom_parameters_images_dict = {}\nfor image_name, image_dict in sample.get_images_dict().items():\n"}}},{"rowIdx":1181,"cells":{"message":{"kind":"string","value":"Fix ToTensor when PIL Image has mode F\nFixes\nThe only case of floating point supported by PIL seems to be `F`, so this should fix it."},"diff":{"kind":"string","value":"@@ -59,6 +59,8 @@ def to_tensor(pic):\nimg = torch.from_numpy(np.array(pic, np.int32, copy=False))\nelif pic.mode == 'I;16':\nimg = torch.from_numpy(np.array(pic, np.int16, copy=False))\n+ elif pic.mode == 'F':\n+ img = torch.from_numpy(np.array(pic, np.float32, copy=False))\nelse:\nimg = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))\n# PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK\n"}}},{"rowIdx":1182,"cells":{"message":{"kind":"string","value":"Remove unused variable\nMy editor keeps moaning about it."},"diff":{"kind":"string","value":"@@ -164,7 +164,6 @@ def measure_by_ccg(request, format=None):\norg_ids = utils.param_to_list(request.query_params.get('org', []))\ntags = [x for x in request.query_params.get('tags', '').split(',') if x]\n- rolled = {}\nmeasure_values = MeasureValue.objects.by_ccg(org_ids, measure_id, tags)\nrsp_data = {\n"}}},{"rowIdx":1183,"cells":{"message":{"kind":"string","value":"Extend the incremental marker for parametrize\nThe incremental marker is adapted to handle properly test classes with parametrize defined at class level.\nFix"},"diff":{"kind":"string","value":"@@ -461,21 +461,49 @@ an ``incremental`` marker which is to be used on classes:\n# content of conftest.py\n- import pytest\n+ # store history of failures per test class name and per index in parametrize (if parametrize used)\n+ _test_failed_incremental: Dict[str, Dict[Tuple[int, ...], str]] = {}\ndef pytest_runtest_makereport(item, call):\nif \"incremental\" in item.keywords:\n+ # incremental marker is used\nif call.excinfo is not None:\n- parent = item.parent\n- parent._previousfailed = item\n+ # the test has failed\n+ # retrieve the class name of the test\n+ cls_name = str(item.cls)\n+ # retrieve the index of the test (if parametrize is used in combination with incremental)\n+ parametrize_index = (\n+ tuple(item.callspec.indices.values())\n+ if hasattr(item, \"callspec\")\n+ else ()\n+ )\n+ # retrieve the name of the test function\n+ test_name = item.originalname or item.name\n+ # store in _test_failed_incremental the original name of the failed test\n+ _test_failed_incremental.setdefault(cls_name, {}).setdefault(\n+ parametrize_index, test_name\n+ )\ndef pytest_runtest_setup(item):\nif \"incremental\" in item.keywords:\n- previousfailed = getattr(item.parent, \"_previousfailed\", None)\n- if previousfailed is not None:\n- pytest.xfail(\"previous test failed ({})\".format(previousfailed.name))\n+ # retrieve the class name of the test\n+ cls_name = str(item.cls)\n+ # check if a previous test has failed for this class\n+ if cls_name in _test_failed_incremental:\n+ # retrieve the index of the test (if parametrize is used in combination with incremental)\n+ parametrize_index = (\n+ tuple(item.callspec.indices.values())\n+ if hasattr(item, \"callspec\")\n+ else ()\n+ )\n+ # retrieve the name of the first test function to fail for this class name and index\n+ test_name = _test_failed_incremental[cls_name].get(parametrize_index, None)\n+ # if name found, test has failed for the combination of class name & test name\n+ if test_name is not None:\n+ pytest.xfail(\"previous test failed ({})\".format(test_name))\n+\nThese two hook implementations work together to abort incremental-marked\ntests in a class. Here is a test module example:\n"}}},{"rowIdx":1184,"cells":{"message":{"kind":"string","value":"add Namespace.add_field\nThis patch adds the add_field method to namespace v2 to facilitate the creation\nof fields that have the same name as their argument."},"diff":{"kind":"string","value":"@@ -709,6 +709,24 @@ class Namespace:\nraise ValueError('Cannot define the jacobian {!r}: dimension is negative.'.format(jacobian))\nsetattr(self, jacobian, function.jacobian(geom, numpy.size(geom) - i))\n+ def add_field(self, __names: Union[str, Sequence[str]], *__bases, shape: Tuple[int, ...] = (), dtype: function.DType = float):\n+ '''Add field(s) of the form ns.u = function.dotarg('u', ...)\n+\n+ Parameters\n+ ----------\n+ names : :class:`str` or iterable thereof\n+ Name of both the generated field and the function argument.\n+ bases : :class:`nutils.function.Array` or something that can be :meth:`nutils.function.Array.cast` into one\n+ The arrays to take inner products with.\n+ shape : :class:`tuple` of :class:`int`, optional\n+ The shape to be appended to the argument.\n+ dtype : :class:`bool`, :class:`int`, :class:`float` or :class:`complex`\n+ The dtype of the argument.\n+ '''\n+\n+ for name in (__names,) if isinstance(__names, str) else __names:\n+ setattr(self, name, function.dotarg(name, *__bases, shape=shape, dtype=dtype))\n+\ndef copy_(self, **replacements: Mapping[str, function.Array]) -> 'Namespace':\n'''Return a copy of this namespace.\n"}}},{"rowIdx":1185,"cells":{"message":{"kind":"string","value":"Missing Sample\nUpdated to show missing sample text for a response."},"diff":{"kind":"string","value":"@@ -144,11 +144,12 @@ General Issues\n2. If an issue has a Jira ticket with a ``help-wanted`` label, there is a Help Wanted ticket in GitHub. It can be closed with the following note:\n.. code-block:: text\n+\nHi @username\nThanks for the report! We have created a [Help Wanted issue here](link to GitHub issue) and are looking for community's help. Would you be interested helping with a pull request?\n-3. If an issue has a Jira ticket without a ``help-wanted`` label and assigned to the current release fix version for a developer to fix, it can be closed with the following note\n+3. If an issue has a Jira ticket without a ``help-wanted`` label and assigned to the current release fix version for a developer to fix, it can be closed with the following note:\n.. code-block:: text\n@@ -171,7 +172,7 @@ General Issues\nFeature Requests\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n-Respond to the issue with the following note\n+Respond to the issue with the following note:\n.. code-block:: text\n"}}},{"rowIdx":1186,"cells":{"message":{"kind":"string","value":"Attempt to fix pipe_to unit test on Windows (for real this time)\nThe previous fix was apparently broken when I checked in with Linux line endings. This approach should be independent of that."},"diff":{"kind":"string","value":"@@ -406,13 +406,10 @@ def test_pipe_to_shell(base_app):\n# Windows\n# Get help menu and pipe it's output to the sort shell command\nout = run_cmd(base_app, 'help | sort')\n- expected = normalize(\"\"\"\n-\n-\n-_relative_load edit history pause pyscript run set shortcuts\n-========================================\n-cmdenvironment help load py quit save shell show\n-Documented commands (type help ):\"\"\")\n+ expected = ['', '', '_relative_load edit history pause pyscript run set shortcuts',\n+ '========================================',\n+ 'cmdenvironment help load py quit save shell show',\n+ 'Documented commands (type help ):']\nassert out == expected\nelse:\n# Mac and Linux\n"}}},{"rowIdx":1187,"cells":{"message":{"kind":"string","value":"Update Task API\nIncluding ended_at in datetime_fields\nRemoving unnecessary DateTimeFilters"},"diff":{"kind":"string","value":"@@ -27,8 +27,10 @@ class TaskSerializer(serializers.ModelSerializer):\n'created_at',\n'task_name',\n'database',\n+\n'rollback',\n'relevance',\n+ 'ended_at',\n)\ndef get_relevance(self, task):\n@@ -87,20 +89,6 @@ class TaskSerializer(serializers.ModelSerializer):\nreturn None\n-class EventFilter(filters.FilterSet):\n- class Meta:\n- model = TaskHistory\n- fields = {\n- 'updated_at': ('lte', 'gte')\n- }\n-\n- filter_overrides = {\n- django_models.DateTimeField: {\n- 'filter_class': django_filters.DateTimeFilter\n- },\n- }\n-\n-\nclass TaskAPI(viewsets.ReadOnlyModelViewSet):\n\"\"\"\n@@ -131,7 +119,6 @@ class TaskAPI(viewsets.ReadOnlyModelViewSet):\nserializer_class = TaskSerializer\npermission_classes = (permissions.IsAuthenticatedOrReadOnly,)\nfilter_backends = (filters.OrderingFilter,)\n- filter_class = EventFilter\nfilter_fields = (\n'task_id',\n'task_status',\n@@ -141,11 +128,13 @@ class TaskAPI(viewsets.ReadOnlyModelViewSet):\n'updated_at',\n'created_at',\n'user',\n- 'relevance'\n+ 'relevance',\n+ 'ended_at',\n+ 'database_name'\n)\nordering_fields = ('created_at', 'updated_at', 'id')\nordering = ('-created_at',)\n- datetime_fields = ('created_at', 'updated_at')\n+ datetime_fields = ('created_at', 'updated_at', 'ended_at')\ndef get_queryset(self):\nparams = self.request.GET.dict()\n"}}},{"rowIdx":1188,"cells":{"message":{"kind":"string","value":"Update CONTRIBUTING.md\nUpdate the contributing instructions to use python-poetry instead of sdispater as the repository namespace."},"diff":{"kind":"string","value":"@@ -87,7 +87,7 @@ You will need Poetry to start contributing on the Poetry codebase. Refer to the\nYou will first need to clone the repository using `git` and place yourself in its directory:\n```bash\n-$ git clone git@github.com:sdispater/poetry.git\n+$ git clone git@github.com:python-poetry/poetry.git\n$ cd poetry\n```\n"}}},{"rowIdx":1189,"cells":{"message":{"kind":"string","value":"Fix handling of ZFIT_DISABLE_TF_WARNING environment variable.\nThe logic in _maybe_disable_warnings() did not actually do what\nthe warning about the suppression of TensorFlow warnings claimed.\nSetting the environment variable had no effect.\nAlso slightly simplified the wording of the warning."},"diff":{"kind":"string","value":"\"\"\"Top-level package for zfit.\"\"\"\n# Copyright (c) 2021 zfit\n-import inspect\n-import sys\nimport warnings\nfrom pkg_resources import get_distribution\n@@ -32,15 +30,16 @@ __all__ = [\"z\", \"constraint\", \"pdf\", \"minimize\", \"loss\", \"core\", \"data\", \"func\",\ndef _maybe_disable_warnings():\nimport os\n- true = \"IS_TRUE\"\n- if not os.environ.get(\"ZFIT_DISABLE_TF_WARNINGS\", true):\n- return\n- elif true:\n- warnings.warn(\"All TensorFlow warnings are by default suppressed by zfit.\"\n- \" In order to not suppress them,\"\n- \" set the environment variable ZFIT_DISABLE_TF_WARNINGS to 0.\"\n+ disable_warnings = os.environ.get(\"ZFIT_DISABLE_TF_WARNINGS\")\n+ if disable_warnings is None:\n+ warnings.warn(\"TensorFlow warnings are by default suppressed by zfit.\"\n+ \" In order to show them,\"\n+ \" set the environment variable ZFIT_DISABLE_TF_WARNINGS=0.\"\n\" In order to suppress the TensorFlow warnings AND this warning,\"\n- \" set ZFIT_DISABLE_TF_WARNINGS manually to 1.\")\n+ \" set ZFIT_DISABLE_TF_WARNINGS=1.\")\n+ elif disable_warnings == '0':\n+ return\n+\nos.environ[\"KMP_AFFINITY\"] = \"noverbose\"\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n"}}},{"rowIdx":1190,"cells":{"message":{"kind":"string","value":"Add flag to disable reservation cleanup\nThis shouldn't be needed on our patched version of k8s that doesn't send\noffers to maint'd hosts. This adds a flag so I can disable it in the\ncron job that cleans up maint'd hosts."},"diff":{"kind":"string","value":"@@ -42,6 +42,10 @@ def parse_args():\n'-v', '--verbose', action='store_true',\ndest=\"verbose\", default=False,\n)\n+ parser.add_argument(\n+ '--disable-reservation-cleanup', action='store_true',\n+ dest=\"disable_reservation_cleanup\", default=False,\n+ )\nargs = parser.parse_args()\nreturn args\n@@ -100,6 +104,7 @@ def main():\ncleanup_forgotten_draining()\ncleanup_forgotten_down()\n+ if not args.disable_reservation_cleanup:\nunreserve_all_resources_on_non_draining_hosts()\nreserve_all_resources_on_draining_hosts()\n"}}},{"rowIdx":1191,"cells":{"message":{"kind":"string","value":"update googlebenchmark version\nupdates googlebenchmark version to match RMM/cuDF"},"diff":{"kind":"string","value":"@@ -4,7 +4,7 @@ include(ExternalProject)\nExternalProject_Add(GoogleBenchmark\nGIT_REPOSITORY https://github.com/google/benchmark.git\n- GIT_TAG main\n+ GIT_TAG v1.5.1\nSOURCE_DIR \"${GBENCH_ROOT}/googlebenchmark\"\nBINARY_DIR \"${GBENCH_ROOT}/build\"\nINSTALL_DIR \"${GBENCH_ROOT}/install\"\n"}}},{"rowIdx":1192,"cells":{"message":{"kind":"string","value":"[modules/spotify] enable scrolling\nthis change should enable scrolling for the spotify module\n(unfortunately, i am unable to fully test this, as i am not using\nspotify)\nfixes"},"diff":{"kind":"string","value":"@@ -110,7 +110,8 @@ class Module(core.module.Module):\ndef hidden(self):\nreturn self.string_song == \"\"\n- def __get_song(self):\n+ @core.decorators.scrollable\n+ def __get_song(self, widget):\nbus = self.__bus\nif self.__bus_name == \"spotifyd\":\nspotify = bus.get_object(\n@@ -128,11 +129,10 @@ class Module(core.module.Module):\nartist=\",\".join(props.get(\"xesam:artist\")),\ntrackNumber=str(props.get(\"xesam:trackNumber\")),\n)\n+ return self.__song\ndef update(self):\ntry:\n- self.__get_song()\n-\nif self.__bus_name == \"spotifyd\":\nbus = self.__bus.get_object(\n\"org.mpris.MediaPlayer2.spotifyd\", \"/org/mpris/MediaPlayer2\"\n@@ -156,7 +156,7 @@ class Module(core.module.Module):\nwidget.set(\"state\", \"paused\")\nelif widget.name == \"spotify.song\":\nwidget.set(\"state\", \"song\")\n- widget.full_text(self.__song)\n+ widget.full_text(self.__get_song(widget))\nexcept Exception as e:\nself.__song = \"\"\n"}}},{"rowIdx":1193,"cells":{"message":{"kind":"string","value":"Fix \"platform_adaptation\" documentation test on windows\nFor reasons I don't full understand, including \"windows.h\" seems to break everything. There's an alternative sleep function in stdlib.h so I've used that instead since it makes the point just as well."},"diff":{"kind":"string","value":"cdef extern from *:\n\"\"\"\n#if defined(_WIN32) || defined(MS_WINDOWS) || defined(_MSC_VER)\n- #define WIN32_LEAN_AND_MEAN\n- #include \n- #define myapp_sleep(m) Sleep(m)\n+ #include \"stdlib.h\"\n+ #define myapp_sleep(m) _sleep(m)\n#else\n#include \n#define myapp_sleep(m) ((void) usleep((m) * 1000))\n"}}},{"rowIdx":1194,"cells":{"message":{"kind":"string","value":"Fix typos\nFixed minor typos - Azaras to Azara's / Ruis' to Rui's"},"diff":{"kind":"string","value":"@@ -89,7 +89,7 @@ True\n## 4. Combine matched records\n-Implement the `create_record()` function that takes a `(treasure, coordinate)` pair from Azaras list and a `(location, coordinate, quadrant)` record from Ruis' list and returns `(treasure, coordinate, location, coordinate, quadrant)` **if the coordinates match**.\n+Implement the `create_record()` function that takes a `(treasure, coordinate)` pair from Azara's list and a `(location, coordinate, quadrant)` record from Rui's list and returns `(treasure, coordinate, location, coordinate, quadrant)` **if the coordinates match**.\nIf the coordinates _do not_ match, return the string **\"not a match\"**\nRe-format the coordinate as needed for accurate comparison.\n"}}},{"rowIdx":1195,"cells":{"message":{"kind":"string","value":"Allow nic-config conversion without Heat\nThe current script requires the orchestration (Heat)\nbe available. This change will allow the script to convert\nexisting templates provided without the orchestration\nservice present."},"diff":{"kind":"string","value":"@@ -82,6 +82,13 @@ def parse_opts(argv):\nparser.add_argument('template',\nmetavar='TEMPLATE_FILE',\nhelp='Existing NIC config template to convert.')\n+ parser.add_argument('--standalone',\n+ default=False,\n+ action='store_true',\n+ help='This switch allows the script to operate in '\n+ 'environments where the orchestration service '\n+ 'is not available. Such as environemnts with '\n+ 'ephemeral-heat')\nopts = parser.parse_args(argv[1:])\n@@ -225,7 +232,8 @@ class ConvertToAnsibleJ2(object):\nif isinstance(param, str):\nif param in self.param_to_var_map:\nreturn self.param_to_var_map[param]\n- elif param in self.stack_env.get('parameter_defaults', {}):\n+ elif (self.stack_env and\n+ param in self.stack_env.get('parameter_defaults', {})):\nstack_value = self.stack_env['parameter_defaults'][param]\nprint('INFO - Custom Parameter {} was hard-coded in the '\n'converted template using the value from the Heat stack '\n@@ -389,7 +397,7 @@ class ConvertToAnsibleJ2(object):\nnet_config_res_props = net_config_res['properties']\nif net_config_res['type'] == 'OS::Heat::Value':\n- h_net_conf = net_config_res_props['value']\n+ h_net_conf = net_config_res_props['value']['network_config']\nelif net_config_res['type'] == 'OS::Heat::SoftwareConfig':\nh_net_conf = net_config_res_props['config']['str_replace'][\n'params']['$network_config']['network_config']\n@@ -501,7 +509,10 @@ def main():\nj2_template = os.path.splitext(template)[0] + '.j2'\nvalidate_files(opts, template, networks_file, j2_template)\n+ if not opts.standalone:\nstack_env = get_stack_environment(opts.stack)\n+ else:\n+ stack_env = None\nconverter = ConvertToAnsibleJ2(stack_env, networks_file)\n"}}},{"rowIdx":1196,"cells":{"message":{"kind":"string","value":"Fix Sphinx crossrefs to 'Client'.\nBroken by move to 'spanner_v1' (the aliases in 'spanner' are not honored).\nCloses"},"diff":{"kind":"string","value":"@@ -42,23 +42,23 @@ Spanner Client\nInstantiating a Client\n----------------------\n-To use the API, the :class:`~google.cloud.spanner.client.Client`\n+To use the API, the :class:`~google.cloud.spanner_v1.client.Client`\nclass defines a high-level interface which handles authorization\nand creating other objects:\n.. code:: python\n- from google.cloud import spanner\n- client = spanner.Client()\n+ from google.cloud import spanner_v1\n+ client = spanner_v1.Client()\nLong-lived Defaults\n-------------------\n-When creating a :class:`~google.cloud.spanner.client.Client`, the\n+When creating a :class:`~google.cloud.spanner_v1.client.Client`, the\n``user_agent`` and ``timeout_seconds`` arguments have sensible\ndefaults\n-(:data:`~google.cloud.spanner.client.DEFAULT_USER_AGENT` and\n-:data:`~google.cloud.spanner.client.DEFAULT_TIMEOUT_SECONDS`).\n+(:data:`~google.cloud.spanner_v1.client.DEFAULT_USER_AGENT` and\n+:data:`~google.cloud.spanner_v1.client.DEFAULT_TIMEOUT_SECONDS`).\nHowever, you may over-ride them and these will be used throughout all API\nrequests made with the ``client`` you create.\n@@ -74,22 +74,22 @@ Configuration\nEngine or Google Compute Engine the project will be detected automatically.\n(Setting this environment variable is not required, you may instead pass the\n``project`` explicitly when constructing a\n- :class:`~google.cloud.spanner.client.Client`).\n+ :class:`~google.cloud.spanner_v1.client.Client`).\n- After configuring your environment, create a\n- :class:`~google.cloud.spanner.client.Client`\n+ :class:`~google.cloud.spanner_v1.client.Client`\n.. code::\n- >>> from google.cloud import spanner\n- >>> client = spanner.Client()\n+ >>> from google.cloud import spanner_v1\n+ >>> client = spanner_v1.Client()\nor pass in ``credentials`` and ``project`` explicitly\n.. code::\n- >>> from google.cloud import spanner\n- >>> client = spanner.Client(project='my-project', credentials=creds)\n+ >>> from google.cloud import spanner_v1\n+ >>> client = spanner_v1.Client(project='my-project', credentials=creds)\n.. tip::\n@@ -106,8 +106,8 @@ Warnings about Multiprocessing\nNext Step\n---------\n-After a :class:`~google.cloud.spanner.client.Client`, the next\n-highest-level object is an :class:`~google.cloud.spanner.instance.Instance`.\n+After a :class:`~google.cloud.spanner_v1.client.Client`, the next\n+highest-level object is an :class:`~google.cloud.spanner_v1.instance.Instance`.\nYou'll need one before you can interact with databases.\nNext, learn about the :doc:`instance-usage`.\n"}}},{"rowIdx":1197,"cells":{"message":{"kind":"string","value":"[TVMC] Keep quantized weights when importing PyTorch model\nBYOC requires `keep_quantized_weight` be set to true when converting\nPyTorch models using `from_torch`. Setting this to be True when using\nTVMC."},"diff":{"kind":"string","value":"@@ -262,7 +262,9 @@ class PyTorchFrontend(Frontend):\ninput_shapes = list(shape_dict.items())\nlogger.debug(\"parse Torch model and convert into Relay computation graph\")\n- return relay.frontend.from_pytorch(traced_model, input_shapes, **kwargs)\n+ return relay.frontend.from_pytorch(\n+ traced_model, input_shapes, keep_quantized_weight=True, **kwargs\n+ )\nclass PaddleFrontend(Frontend):\n"}}},{"rowIdx":1198,"cells":{"message":{"kind":"string","value":"Release: Make sure to check with pip locally before uploading to PyPI\n* This will avoid breakage like recently with runners wrongly handled\nby pip.\n* Only very basic test is done with pip installed Nuitka."},"diff":{"kind":"string","value":"from __future__ import print_function\nimport os\n+import sys\n+import shutil\nfrom nuitka.tools.release.Documentation import createReleaseDocumentation\nfrom nuitka.tools.release.Release import checkBranchName\n@@ -53,31 +55,31 @@ def main():\ncontents = open(\"README.rst\", \"rb\").read()\nassert b\".. contents\" not in contents\n+ shutil.rmtree(\"check_nuitka\", ignore_errors = True)\n+ shutil.rmtree(\"dist\", ignore_errors = True)\n+\nprint(\"Creating documentation.\")\ncreateReleaseDocumentation()\nprint(\"Creating source distribution.\")\nassert os.system(\"python setup.py sdist\") == 0\n+\n+ print(\"Creating virtualenv for quick test:\")\n+ assert os.system(\"virtualenv check_nuitka\") == 0\n+\n+ print(\"Installing Nuitka into virtualenv:\")\n+ print(\"*\" * 40)\n+ assert os.system(\"cd check_nuitka; . bin/activate; pip install ../dist/Nuitka*.tar.gz\") == 0\n+ print(\"*\" * 40)\n+\n+ print(\"Compiling basic test:\")\n+ print(\"*\" * 40)\n+ assert os.system(\"cd check_nuitka; . bin/activate; nuitka-run ../tests/basics/Asserts.py\") == 0\n+ print(\"*\" * 40)\n+\n+ if \"check\" not in sys.argv:\n+ assert False\nprint(\"Uploading source dist\")\nassert os.system(\"twine upload dist/*\") == 0\nprint(\"Uploaded.\")\n-\n- # TODO: This won't work yet.\n- # import time\n- # import xmlrpclib\n- # if False:\n- # for _i in range(60):\n- # # Wait some time for PyPI to catch up with us. Without delay\n- # # the old version will still appear. Since this is running\n- # # in a Buildbot, we need not be optimal.\n- # time.sleep(5*60)\n- #\n- # pypi = xmlrpclib.ServerProxy(\"https://pypi.python.org/pypi\")\n- # pypi_versions = pypi.package_releases(\"Nuitka\")\n- #\n- # assert len(pypi_versions) == 1, pypi_versions\n- # if nuitka_version == pypi_versions[0]:\n- # break\n- #\n- # print(\"Version check failed:\", nuitka_version, pypi_versions)\n- #\n- # print(\"Uploaded OK:\", pypi_versions[0])\n+ else:\n+ print(\"Checked OK, not uploaded.\")\n"}}},{"rowIdx":1199,"cells":{"message":{"kind":"string","value":"Making the start of stop string to mark hidden tests configurable\nTODO: Find out why nbgrader quickstart does not put them into the configuration file?"},"diff":{"kind":"string","value":"@@ -3,13 +3,39 @@ import re\nfrom .. import utils\nfrom . import NbGraderPreprocessor\n+from traitlets import Unicode\n+from textwrap import dedent\nclass RemoveHidden(NbGraderPreprocessor):\n+ hidestart = Unicode(\n+ '### HIDESTART',\n+ config=True,\n+ help=dedent(\n+ \"\"\"\n+ Suppose you want to hide some test cases from your students in a cell.\n+ Place this string before those test cases and the corresponding string\n+ hideend after them.\n+ \"\"\"\n+ )\n+ )\n+\n+ hideend = Unicode(\n+ '### HIDEEND',\n+ config=True,\n+ help=dedent(\n+ \"\"\"\n+ Suppose you want to hide some test cases from your students in a cell.\n+ Place this string after those tests.\n+ \"\"\"\n+ )\n+ )\ndef preprocess_cell(self, cell, resources, cell_index):\nif utils.is_grade(cell) or utils.is_solution(cell) or utils.is_locked(cell):\n- cell.source = re.sub('START(?:.|\\n)*?STOP', '', cell.source)\n+ cell.source = re.sub('{}(?:.|\\n)*?{}'.format(self.hidestart,\n+ self.hideend)\n+ , '', cell.source)\n# we probably don't really need this?\ncell.metadata.nbgrader['oldchecksum'] = cell.metadata.nbgrader['checksum']\n"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":11,"numItemsPerPage":100,"numTotalItems":13545,"offset":1100,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjM2MTQ5MCwic3ViIjoiL2RhdGFzZXRzL211ZWxsZXJ6ci9jb21taXQtY3JvbmljbGUtcHJvY2Vzc2VkLWRlLW5ld2xpbmVzIiwiZXhwIjoxNzU2MzY1MDkwLCJpc3MiOiJodHRwczovL2h1Z2dpbmdmYWNlLmNvIn0.5URXr5xBYnfFbw5BPlM99Zin5wsidVIc4YcA_c9Qhhx5LuWiExOrKnMwNWseGG-blvyKVkK-2lqHkx8aQaErAw","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
Remove unneeded check and add comments
Removes the unneeded check for if the cache is empty.
Also adds a seconds comment about the format of the contents
of the Redis cache.
@@ -30,6 +30,7 @@ AOC_WHITELIST = AOC_WHITELIST_RESTRICTED + (Channels.advent_of_code,)
class AdventOfCode(commands.Cog):
"""Advent of Code festivities! Ho Ho Ho!"""
+ # Redis Cache for linking Discord IDs to Advent of Code usernames
account_links = RedisCache()
def __init__(self, bot: Bot):
@@ -186,23 +187,9 @@ class AdventOfCode(commands.Cog):
"""
Link your Discord Account to your Advent of Code name.
- Stored in a Redis Cache, Discord ID: Advent of Code Name
+ Stored in a Redis Cache with the format of `Discord ID: Advent of Code Name`
"""
cache_items = await self.account_links.items()
-
- # A short circuit in case the cache is empty
- if len(cache_items) == 0 and aoc_name:
- log.info(f"{ctx.author} ({ctx.author.id}) is now linked to {aoc_name}.")
- await self.account_links.set(ctx.author.id, aoc_name)
- await ctx.reply(f"You have linked your Discord ID to {aoc_name}.")
- return
- elif len(cache_items) == 0:
- await ctx.reply(
- "You have not linked an Advent of Code account."
- "Please re-run the command with one specified."
- )
- return
-
cache_aoc_name = [value for _, value in cache_items]
if aoc_name:
(airline-demo-testability-2) Use file handle solids for sfo_weather_data
Summary: Use the new fancy things. No behavior change.
Test Plan: Run in dagit. Buildkite.
Reviewers: max, natekupp
Fixed js error in iframe_login
This is a bug in a previous PR. There's no javascript after it, so it
doesn't seem to stop the user from being able to log in, but still good to fix.
<script src="{% static 'blazy/blazy.js' %}"></script>
<script>
new Blazy({container: 'body'});
- var username = (new URLSearchParams(window.location.search)).get("username");
- if (username) {
+ var username = (new URLSearchParams(window.location.search)).get("username"),
+ element = document.getElementById('id_auth-username');
+ if (username && element) {
if (username.endsWith("commcarehq.org")) {
username = username.split("@")[0];
}
- document.getElementById('id_auth-username').value = username;
+ element.value = username;
}
</script>
</body>
fix wrong link
the previous link can not open, the correct link maybe
Add more details to the internal error for "worker cannot find registered function"
This adds some more debug information for this internal error that shouldn't happen.
import dis
import hashlib
+import os
import importlib
import inspect
import json
@@ -405,7 +406,10 @@ class FunctionActorManager:
warning_message = (
"This worker was asked to execute a "
"function that it does not have "
- "registered. You may have to restart "
+ f"registered ({function_descriptor}, "
+ f"node={self._worker.node_ip_address}, "
+ f"worker_id={self._worker.worker_id.hex()}, "
+ f"pid={os.getpid()}). You may have to restart "
"Ray."
)
if not warning_sent:
Allow user-defined kwargs passed to click.group
Fixes
Fix typo
Fix "contorls" to "controls" in window_text docstring
@@ -307,7 +307,7 @@ class BaseWrapper(object):
"""
Window text of the element
- Quite a few contorls have other text that is visible, for example
+ Quite a few controls have other text that is visible, for example
Edit controls usually have an empty string for window_text but still
have text displayed in the edit window.
"""
Minor relocate of badge
no more info needed

Cool Instagram scripts for promotion and API wrapper. Written in Python.
___
+[](https://t.me/joinchat/AAAAAEHxHAtKhKo4X4r7xg)
+
As you may know, Instagram closed its API in summer 2016. This Python module can do the same things without any effort. Also it has lots of [example scripts](https://github.com/ohld/instabot/tree/master/examples) to start with.
If you have any ideas, please, leave them in [Issues section](https://github.com/ohld/instabot/issues) or in our [Telegram chat](https://t.me/joinchat/AAAAAEHxHAtKhKo4X4r7xg).
-[](https://t.me/joinchat/AAAAAEHxHAtKhKo4X4r7xg)
-
-*Your __contribution__ and support through __Stars__ will be highly appreciated.*
+*Your __contribution__ and support through __stars__ will be highly appreciated.*
## How to install and update
{Compute} Doc fix for 'vm user delete'
Clarify that 'vm user delete' also removes the home directory on Linux systems.
@@ -1950,6 +1950,8 @@ short-summary: Manage user accounts for a VM.
helps['vm user delete'] = """
type: command
short-summary: Delete a user account from a VM.
+long-summary: >
+ Also deletes the user home directory on Linux VMs.
examples:
- name: Delete a user account.
text: az vm user delete -u username -n MyVm -g MyResourceGroup
Adds the job user and team
This is required as some challenge admins want to see the teams for
each job in order to download the supplementary file.
{% extends "site.html" %}
{% load evaluation_extras %}
+{% load user_profile_link from profiles %}
{% load guardian_tags %}
{% load url from grandchallenge_tags %}
<thead>
<tr>
<th>ID</th>
+ {% if "change_challenge" in challenge_perms %}
+ <th>User</th>
+ {% endif %}
<th>Created</th>
<th>Updated</th>
<th>Status</th>
{% for job in object_list %}
<tr>
<td>{{ job.id }}</td>
+ {% if "change_challenge" in challenge_perms %}
+ <td>
+ {{ job.submission.creator|user_profile_link }}
+
+ {% if site.evaluation_config.use_teams %}
+ {% with job.result|get_team_html as team_html %}
+ {% if team_html %}
+ ({{ team_html }})
+ {% endif %}
+ {% endwith %}
+ {% endif %}
+ </td>
+ {% endif %}
<td data-order="{{ job.created|date:"U" }}">{{ job.created }}</td>
<td data-order="{{ job.modified|date:"U" }}">{{ job.modified }}</td>
<td>
add stdout as an output format for report subcommand
Using --format=stdout now writes output to STDOUT in human-readable
form, in addition to tabular/Excel/etc.
@@ -41,6 +41,7 @@ THE SOFTWARE.
"""
import os
+import sys
import numpy as np
import pandas as pd
@@ -123,6 +124,13 @@ def write_styled_html(path, df, index=None):
ofh.write(html)
+# Write a dataframe to STDOUT
+def write_to_stdout(stem, df, index=None, line_width=None):
+ """Write dataframe in tab-separated form to STDOUT."""
+ sys.stdout.write("TABLE: %s\n" % stem)
+ sys.stdout.write(df.to_string(index=index, line_width=line_width) + '\n\n')
+
+
# Write a table returned from the pyani database in the requested format
def write_dbtable(data, headers, path=None, formats=('tab',), index=False):
"""Write database result table to output file in named format."""
@@ -131,7 +139,9 @@ def write_dbtable(data, headers, path=None, formats=('tab',), index=False):
formatdict = {'tab': (df.to_csv, {'sep': '\t', 'index': False}, '.tab'),
'excel': (df.to_excel, {'index': False}, '.xlsx'),
'html': (write_styled_html, {'df': df, 'index': index},
- '.html')}
+ '.html'),
+ 'stdout': (write_to_stdout, {'df': df, 'index': False}, '')
+ }
for format in formats:
func, args, ext = formatdict[format]
ofname = path + ext
chore: correct region tag in submit_job_to_cluster.py
Change region tag to make it unique. The previous tag was used in another create cluster file and caused problems with automation tools.
wallet.get_request_by_addr: make deterministic
This makes test_invoices/test_wallet_get_request_by_addr pass without flakyness.
closes
@@ -2355,8 +2355,13 @@ class Abstract_Wallet(ABC, Logger, EventListener):
if not req.is_lightning() or self.lnworker.get_invoice_status(req) == PR_UNPAID]
if not reqs:
return None
- # note: there typically should not be more than one relevant request for an address
- return reqs[0]
+ # note: There typically should not be more than one relevant request for an address.
+ # If there's multiple, return the one created last (see #8113). Consider:
+ # - there is an old expired req1, and a newer unpaid req2, reusing the same addr (and same amount),
+ # - now req2 gets paid. however, get_invoice_status will say both req1 and req2 are PAID. (see #8061)
+ # - as a workaround, we return the request with the larger creation time.
+ reqs.sort(key=lambda req: req.get_time())
+ return reqs[-1]
def get_request(self, request_id: str) -> Optional[Invoice]:
return self._receive_requests.get(request_id)
@@ -19,7 +19,7 @@ fi
echo "Starting devserver in new tmux session..."
tmux new-session -d -s $session
tmux new-window -t "$session:1" -n gae "dev_appserver.py --admin_host=0.0.0.0 --host=0.0.0.0 --datastore_path=/datastore/tba.db src/default.yaml src/web.yaml src/api.yaml src/dispatch.yaml 2>&1 | tee /var/log/tba.log; read"
-tmux new-window -t "$session:2" -n gulp "gulp 2>&1 tee /var/log/gulp.log; read"
+tmux new-window -t "$session:2" -n gulp "gulp 2>&1 | tee /var/log/gulp.log; read"
if [ ! -z "$instance_name" ]; then
echo "Starting Cloud SQL proxy to connect to $instance_name"
tmux new-window -t "$session:3" -n sql "/cloud_sql_proxy -instances=$instance_name=tcp:3306 -credential_file=$auth_path | tee /var/log/sql.log; read"
Changes default "onBadFit" option to *nothing* (not even Robust+).
This update to the default behavior of do_long_sequence_gst when
a model doesn't fit the data is more conservative -- only do the
special Robust+ or wildcard post-processing analysis when the user
specificially requests it.
@@ -1329,7 +1329,7 @@ def _post_opt_processing(callerName, ds, target_model, mdl_start, lsgstLists,
objective = advancedOptions.get('objective', 'logl')
badFitThreshold = advancedOptions.get('badFitThreshold',DEFAULT_BAD_FIT_THRESHOLD)
if ret.estimates[estlbl].misfit_sigma(evaltree_cache=evaltree_cache, comm=comm) > badFitThreshold:
- onBadFit = advancedOptions.get('onBadFit',["wildcard"]) #["Robust+"]) # empty list => 'do nothing'
+ onBadFit = advancedOptions.get('onBadFit',[]) #["wildcard"]) #["Robust+"]) # empty list => 'do nothing'
if len(onBadFit) > 0 and parameters.get('weights',None) is None:
Python API: override __nonzero__ for node wrappers
TN:
@@ -790,6 +790,16 @@ class ${root_astnode_name}(object):
ctypes.byref(result))
return ${root_astnode_name}._wrap(result)
+ def __nonzero__(self):
+ """
+ Return always True so that checking a node against None can be done as
+ simply as::
+
+ if node:
+ ...
+ """
+ return True
+
def __len__(self):
"""Return the number of ${root_astnode_name} children this node has."""
node = self._unwrap(self)
container-common: Enable docker on boot for ubuntu
docker daemon is automatically started during package installation
but the service isn't enabled on boot.
tags:
with_pkg
- - name: start docker service
- service:
- name: docker
- state: started
- enabled: yes
- tags:
- with_pkg
-
- name: red hat 8 based systems tasks
when:
- ansible_distribution_major_version == '8'
tags:
with_pkg
+- name: start docker service
+ service:
+ name: docker
+ state: started
+ enabled: yes
+ tags:
+ with_pkg
+ when: not (ansible_os_family == 'RedHat' and
+ ansible_distribution_major_version == '8')
+
- name: ensure tmpfiles.d is present
lineinfile:
path: /etc/tmpfiles.d/ceph-common.conf
Improve sentence parsing
I've always parsed this sentence as "attrs comes with serious, business aliases". I just realized you probably meant srs bzns aliases and figured I'd clarify.
@@ -48,7 +48,7 @@ By default, all features are added, so you immediately have a fully functional d
As shown, the generated ``__init__`` method allows for both positional and keyword arguments.
-If playful naming turns you off, ``attrs`` comes with serious business aliases:
+If playful naming turns you off, ``attrs`` comes with serious-business aliases:
.. doctest::
@@ -117,14 +117,14 @@ goto FINAL
:DETECT_INKSCAPE_LOCATION
echo Trying to find Inkscape in Windows Registry...
+rem Checking NSIS-Installer registry information
rem Inkscape installation path is usually found in the registry
-rem "SOFTWARE\Inkscape\Inkscape"
-rem under HKLM (Local Machine -> machine wide installation) or
-rem HKCU (Current User -> user installation)
+rem "SOFTWARE\Inkscape\Inkscape" under HKLM (Local Machine ->
+rem machine wide installation) or rem HKCU (Current User ->
+rem user installation) if installed via NSIS exe installer.
rem We also have to keep in mind that the values might be in the 32bit or 64bit
rem version of the registry (i.e., under SOFTWARE\WOW6432Node\Inkscape\Inkscape
rem or SOFTWARE\Inkscape\Inkscape)
-rem This holds if Inkscape has been installed via via NSIS, not via MSI
for %%R in (HKLM HKCU) do (
for %%T in (32 64) do (
rem Output of REG QUERY "KeyName" /ve is (first line is a blank line):
@@ -136,7 +136,7 @@ for %%R in (HKLM HKCU) do (
rem so we skip the first two lines (skip=2) and then we take the second token
rem and the reamining output (tokens=2*), so %%A is REG_SZ and %%B is the path
rem even if it contains spaces (tokens are delimited by spaces)
- echo Trying registry root %%R [%%T]...
+ echo Trying SOFTWARE\Inkscape\Inkscape in registry root %%R [%%T]...
for /f "usebackq skip=2 tokens=2*" %%A in (`REG QUERY "%%R\SOFTWARE\Inkscape\Inkscape" /ve /reg:%%T 2^>nul`) do (
if exist %%B (
set INKSCAPE_DIR=%%B
@@ -157,6 +157,33 @@ for %%R in (HKLM HKCU) do (
)
)
+
+rem Checking MSI-Installer registry information
+rem Inkscape installation path is usually found in the registry
+rem under key "Path" in
+rem SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\inkscape.exe
+rem if installed via msi installer
+for %%T in (32 64) do (
+ echo Trying SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\inkscape.exe in registry root HKLM [%%T]...
+ for /f "usebackq skip=2 tokens=2*" %%A in (`REG QUERY "HKLM\SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\inkscape.exe" /v Path /reg:%%T 2^>nul`) do (
+ if exist %%B (
+ set INKSCAPE_DIR=%%B
+ )
+ )
+ if defined INKSCAPE_DIR (
+ echo Inkscape considered to be installed in !INKSCAPE_DIR!
+ echo Setting executable path to !INKSCAPE_DIR!
+ if exist "!INKSCAPE_DIR!\!INKSCAPE_EXENAME!" (
+ echo !INKSCAPE_DIR!\!INKSCAPE_EXENAME! found
+ echo.
+ goto INKSCAPE_FOUND
+ ) else (
+ echo !INKSCAPE_DIR!\!INKSCAPE_EXENAME! not found
+ )
+ )
+)
+
+
rem If we did non succeed in the registry lets have a look
rem at the most common install locations
echo Trying the usual Windows install locations...
Quick syntax correction for clarity
`''.join(srcCode)` is not really readable. On the other hand, `str.join('', srcCode)` is much better.
@@ -31,7 +31,7 @@ for file in allFiles:
srcCode = f.readlines()
# The last three lines are always the main() call
srcCode = srcCode[:-3]
- srcCode = ''.join(srcCode)
+ srcCode = str.join('', srcCode)
module.__SRC_CODE = srcCode
bmpSrcCode = highlight(srcCode, PythonLexer(), BmpImageFormatter())
Update language ID map
html: add text.html.ngx for angular files
shaderlab: these are supposedly Unity Shaderlab files
r: the R language server can also handle R-flavoured markdown files
xsl and xml: decouple them
In general, added repo links to thirdparty syntaxes
boot: Remove special case for pypy load failures
There was a special case for Pypy in the handling of errors when loading
components. It looks like in the years since it was written, it may
have become unnecessary. Removing it leads to more helpful error
messages, so... let's remove it?
@@ -104,9 +104,7 @@ def _do_import (name):
message = str(sys.exc_info()[1].args[0])
s = message.rsplit(" ", 1)
- # Sadly, PyPy isn't consistent with CPython here.
- #TODO: Check on this behavior in pypy 2.0.
- if s[0] == "No module named" and (name.endswith(s[1]) or __pypy__):
+ if s[0] == "No module named" and (name.endswith(s[1])):
# It was the one we tried to import itself. (Case 1)
# If we have other names to try, try them!
return do_import2(base_name, names_to_try)
fix bug wth --runtest where software or system packages were not showing due to directory error.
The if conditions in eb_menu were not setup properly.
@@ -32,7 +32,7 @@ import subprocess
import time
import glob
-from buildtest.tools.config import BUILDTEST_ROOT
+from buildtest.tools.config import BUILDTEST_ROOT, config_opts
from buildtest.tools.menu import buildtest_menu
def systempkg_menu(systempkg):
@@ -199,6 +199,8 @@ def eb_menu(ebpkg):
app_tc_set = set()
+
+
# translate directory path into app name/version and toolchain name/version
for item in testroot_set:
# directory format $BUILDTEST_TESTDIR/ebapps/software/version, ebapp only 2 directories up
@@ -224,8 +226,7 @@ def eb_menu(ebpkg):
app_tc_set.add(app_ver+","+tcname_tcver)
# directory format $BUILDTEST_TESTDIR/ebapps/software/version/package, ebapp only 3 directories up
-
- if os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(item)))) == "ebapp":
+ elif os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(item)))) == "ebapp":
app = os.path.basename(os.path.dirname(os.path.dirname(item)))
ver = os.path.basename(os.path.dirname(item))
@@ -235,7 +236,7 @@ def eb_menu(ebpkg):
# directory format $BUILDTEST_TESTDIR/ebapps/software/version/toolchainname/toolchainver/package, ebapp only 5 directories up
- if os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(item)))))) == "ebapp":
+ elif os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(item)))))) == "ebapp":
app = os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(item)))))
ver = os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(item))))
@@ -438,11 +439,8 @@ def runtest_menu():
os.system("clear")
- cwd = BUILDTEST_ROOT
- testing = os.path.join(cwd,"testing")
- systempkg = os.path.join(testing,"system")
- ebpkg = os.path.join(testing,"ebapp")
-
+ system_testdir = os.path.join(config_opts['BUILDTEST_TESTDIR'],"system")
+ software_testdir = os.path.join(config_opts['BUILDTEST_TESTDIR'],"ebapp")
text = """
_________________________________________________________________________
|\ /|
@@ -491,8 +489,8 @@ def runtest_menu():
# force userinput to be integer in case its float or something else
userinput = int(userinput)
if userinput == 1:
- systempkg_menu(systempkg)
+ systempkg_menu(system_testdir)
elif userinput == 2:
- eb_menu(ebpkg)
+ eb_menu(software_testdir)
else:
print "Invalid Entry, please try again"
Fixes an "*" import in the middle of the code.
Importing everything without namespace is a bad practice.
Doing it outside module level is currently forbidden.
Python 3.9.1 refuses to compile it.
Flake8 reports: F406 'from kicost.kicost_gui import *' only allowed
at module level.
@@ -63,7 +63,7 @@ class kicost_kicadplugin(ActionPlugin):
bom_file = ''
try:
try:
- from kicost.kicost_gui import *
+ from kicost.kicost_gui import kicost_gui
kicost_gui(bom_file) # If KiCad and KiCost share the same Python installation.
except ImportError:
subprocess.call(('kicost', '--guide', bom_file), shell=True)
Update train.py
remove redundant code
@@ -98,7 +98,6 @@ def main(train_data_file, test_data_file, vocab_file, target_file, emb_file,
for pass_id in xrange(num_passes):
chunk_evaluator.reset(exe)
for data in train_reader():
- print len(data)
cost, batch_precision, batch_recall, batch_f1_score = exe.run(
fluid.default_main_program(),
feed=feeder.feed(data),
Split the empty cluster case from normal case
Cover normal usage of get_brokers function
bootstrap_javascript use settings include_jquery
tnx
@@ -282,7 +282,7 @@ def bootstrap_jquery(jquery='full'):
@register.simple_tag
-def bootstrap_javascript(jquery='falsy'):
+def bootstrap_javascript(jquery=None):
"""
Return HTML for Bootstrap JavaScript.
@@ -315,7 +315,7 @@ def bootstrap_javascript(jquery='falsy'):
javascript_tags = []
# Set jquery value from setting or leave default.
- jquery = get_bootstrap_setting('include_jquery') or jquery
+ jquery = jquery or get_bootstrap_setting('include_jquery', 'falsy')
# Include jQuery if the option is passed
if jquery != 'falsy':
tests/state_docs: clear registry before running the test.
Make sure docs examples get consistent naming
@@ -3,6 +3,18 @@ import pytest
import psyneulink as pnl
import doctest
+def clear_registry():
+ from psyneulink.components.component import DeferredInitRegistry
+ from psyneulink.components.system import SystemRegistry
+ from psyneulink.components.process import ProcessRegistry
+ from psyneulink.components.mechanisms.mechanism import MechanismRegistry
+ from psyneulink.components.projections.projection import ProjectionRegistry
+ # Clear Registry to have a stable reference for indexed suffixes of default names
+ pnl.clear_registry(DeferredInitRegistry)
+ pnl.clear_registry(SystemRegistry)
+ pnl.clear_registry(ProcessRegistry)
+ pnl.clear_registry(MechanismRegistry)
+ pnl.clear_registry(ProjectionRegistry)
def test_state_docs():
# get examples of mechanisms that can be used with GatingSignals/Mechanisms
@@ -10,7 +22,8 @@ def test_state_docs():
def test_parameter_state_docs():
- fail, total = doctest.testmod(pnl.components.states.parameterstate)
+ clear_registry()
+ fail, total = doctest.testmod(pnl.components.states.parameterstate, globs={})
if fail > 0:
pytest.fail("{} out of {} examples failed".format(fail, total),
Fix when filter working on POST
HG--
branch : feature/microservices
@@ -65,6 +65,8 @@ class ExtFormatMiddleware(object):
def process_request(self, request):
if request.GET and request.GET.get("__format") == "ext":
request.is_extjs = True
+ elif request.POST and request.POST.get("__format") == "ext":
+ request.is_extjs = True
else:
request.is_extjs = False
use addClassResourceCleanup in test_roles
Replaces resource_cleanup with addClassResourceCleanup in
test_roles.
test_utils.call_and_ignore_notfound_exc is NOT used in resource_setup
as delete_role_from_user_on_project and similar calls, do not delete
the role, it just unassigns the role from the target.
Add build status to README
Library can now successfully do nothing
# Manim - Mathematical Animation Engine
[](https://manim.readthedocs.io/en/latest/?badge=latest)
+[](https://travis-ci.org/3b1b/manim)
[](http://choosealicense.com/licenses/mit/)
Manim is an animation engine for explanatory math videos. It's used to create precise animations programmatically, as seen in the videos at [3Blue1Brown](https://www.3blue1brown.com/).
fixed duplication of representations
nuke.api.plugin.ExporterReview adds representation explicitly via publish_on_farm, so skip adding repre if already there. (Issue in ExtractBurnin other way.)
ExporterReview should be probably refactored and publish_on_farm removed altogether.
@@ -601,7 +601,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"files": os.path.basename(remainder),
"stagingDir": os.path.dirname(remainder),
}
- representations.append(rep)
if "render" in instance.get("families"):
rep.update({
"fps": instance.get("fps"),
@@ -609,6 +608,16 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
})
self._solve_families(instance, True)
+ already_there = False
+ for repre in instance.get("representations", []):
+ # might be added explicitly before by publish_on_farm
+ already_there = repre.get("files") == rep["files"]
+ if already_there:
+ break
+ self.log.debug("repre {} already_there".format(repre))
+ if not already_there:
+ representations.append(rep)
+
return representations
def _solve_families(self, instance, preview=False):
Add more description to policies in the keypairs.py
This updates the policy doc for server extend controller in keypairs.py
Partial implement blueprint blueprint policy-docs
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_policy import policy
-
from nova.policies import base
@@ -63,9 +61,20 @@ keypairs_policies = [
'method': 'GET'
}
]),
- policy.RuleDefault(
- name=BASE_POLICY_NAME,
- check_str=base.RULE_ADMIN_OR_OWNER),
+ base.create_rule_default(
+ BASE_POLICY_NAME,
+ base.RULE_ADMIN_OR_OWNER,
+ "Return 'key_name' in the response of server.",
+ [
+ {
+ 'path': '/servers/{id}',
+ 'method': 'GET',
+ },
+ {
+ 'path': '/servers/detail',
+ 'method': 'GET'
+ }
+ ]),
]
Fixed _custom_opac flag
If we specify opacity for every point, then we should set _custom_opac to true.
@@ -1484,7 +1484,7 @@ class BasePlotter(PickingHelper, WidgetHelper):
opacity = np.array(opacity)
if scalars.shape[0] == opacity.shape[0]:
# User could pass an array of opacities for every point/cell
- pass
+ _custom_opac = True
else:
opacity = opacity_transfer_function(opacity, n_colors)
Add warning about mounting relative paths
and minor tweaks
@@ -72,11 +72,16 @@ Running mriqc
automatically be executed without need of running the command in item 3.
+.. warning::
+
+ Paths `<bids_dir>` and `<output_dir>` must be absolute. In particular, specifying relative paths for
+ `<output_dir>` will generate no error and mriqc will run to completion without error but produce no output.
+
.. warning::
For security reasons, we recommend to run the docker command with the options
``--read-only --tmpfs /run --tmpfs /tmp``. This will run the docker image in
- read-only mode, and map the temporal folders ``/run`` and ``/tmp`` to the temporal
+ read-only mode, and map the temporary folders ``/run`` and ``/tmp`` to the temporal
folder of the host.
@@ -87,7 +92,7 @@ Explaining the mriqc-docker command line
Let's dissect this command line:
-+ :code:`docker run`- instructs the docker engine to get and run certain
++ :code:`docker run`- instructs the docker engine to get and run a certain
image (which is the last of docker-related arguments:
:code:`poldracklab/mriqc:latest`)
+ :code:`-v <bids_dir>:/data:ro` - instructs docker to mount the local
Cast regularization parameters to float.
This works around a bug in earlier proto versions
that automatically infer these values to be integer
instead of float.
DOC: Update TESTS.rst to use the correct names
Not actually sure that setup_module() is what was wanted here, but
it works?
Mention a bit more about actual pytest fixtures.
@@ -178,30 +178,33 @@ Similarly for methods::
Easier setup and teardown functions / methods
---------------------------------------------
-Testing looks for module-level or class-level setup and teardown functions by
-name; thus::
+Testing looks for module-level or class method-level setup and teardown
+functions by name; thus::
- def setup():
+ def setup_module():
"""Module-level setup"""
print('doing setup')
- def teardown():
+ def teardown_module():
"""Module-level teardown"""
print('doing teardown')
class TestMe:
- def setup():
+ def setup_method(self):
"""Class-level setup"""
print('doing setup')
- def teardown():
+ def teardown_method():
"""Class-level teardown"""
print('doing teardown')
Setup and teardown functions to functions and methods are known as "fixtures",
-and their use is not encouraged.
+and they should be used sparingly.
+``pytest`` supports more general fixture at various scopes which may be used
+automatically via special arguments. For example, the special argument name
+``tmpdir`` is used in test to create a temporary directory.
Parametric tests
----------------
Optimize mesh export using np.fromiter.
Made optimization of mesh export using np.fromiter() instead of creating creating python lists of mesh data. On my tests it speedups export process more than 2 times, on some scenes ore than 4 times.
@@ -43,24 +43,26 @@ class MeshData:
if tris_len == 0:
raise SyncError("Mesh %s has no polygons" % mesh.name, mesh)
- data.vertices = np.array([vert.co for vert in mesh.vertices], dtype=np.float32)
- data.normals = np.array(
- [norm for tri in mesh.loop_triangles
- for norm in tri.split_normals],
- dtype=np.float32
- )
+ data.vertices = np.fromiter(
+ (x for vert in mesh.vertices for x in vert.co),
+ dtype=np.float32).reshape((len(mesh.vertices), 3))
+ data.normals = np.fromiter(
+ (x for tri in mesh.loop_triangles for norm in tri.split_normals for x in norm),
+ dtype=np.float32).reshape((tris_len * 3, 3))
data.uvs = None
data.uv_indices = None
if len(mesh.uv_layers) > 0:
uv_layer = mesh.uv_layers.active
- uvs = np.array([[d.uv.x, d.uv.y] for d in uv_layer.data], dtype=np.float32)
+ uvs = np.fromiter(
+ (x for d in uv_layer.data for x in d.uv),
+ dtype=np.float32).reshape((len(uv_layer.data), 2))
if len(uvs) > 0:
data.uvs = uvs
- data.uv_indices = np.array([tri.loops for tri in mesh.loop_triangles], dtype=np.int32).reshape((tris_len * 3,))
+ data.uv_indices = np.fromiter((x for tri in mesh.loop_triangles for x in tri.loops), dtype=np.int32)
data.num_face_vertices = np.full((tris_len,), 3, dtype=np.int32)
- data.vertex_indices = np.array([tri.vertices for tri in mesh.loop_triangles], dtype=np.int32).reshape((tris_len * 3,))
+ data.vertex_indices = np.fromiter((x for tri in mesh.loop_triangles for x in tri.vertices), dtype=np.int32)
data.normal_indices = np.arange(tris_len * 3, dtype=np.int32)
if calc_area:
Add ContainerImagePrepare service to CellController role
The CellController role does not have ContainerImagePrepare
service. This result in empty external_deploy_steps_tasks.yaml
and does not update container images when trying to update
the cell stack.
Closes-Bug:
Added SLIs, SLOs and Burn rate Alerts section
SLIs, SLOs and Burn rate Alerts section documentation, need to add pictures.
+fixed typos in slos.tf
# See the License for the specific language governing permissions and
# limitations under the License.
-# Create an SLO for availablity for the custom service.
+# Create an SLO for availability for the custom service.
# Example SLO is defined as following:
# 90% of all non-4XX requests within the past 30 day windowed period
# return with 200 OK status
@@ -89,7 +89,7 @@ resource "google_monitoring_slo" "custom_service_latency_slo" {
}
}
-# Create an SLO for availablity for the Istio service.
+# Create an SLO for availability for the Istio service.
# Example SLO is defined as following:
# 90% of all non-4XX requests within the past 30 day windowed period
# return with 200 OK status
@@ -249,7 +249,7 @@ resource "google_monitoring_slo" "rating_service_latency_slo" {
}
}
-# Rating data freshness SLO:
+# Rating service's data freshness SLO:
# during a day 99.9% of minutes have at least 1 successful recollect API call
resource "google_monitoring_slo" "rating_service_freshness_slo" {
# Uses ratingservice service that is automatically detected and created when the service is deployed to App Engine
generate_adhoc_ssl_pair: make issuer match subject
With this change, the generated certificate can be trusted,
and the following command starts working:
openssl s_client -showcerts -connect dev:443 -verifyCAfile dev.crt </dev/null
Do not fail if process already ended
We can expect the subprocess has already ended by the time we're
checking for child processes. Handle this case gracefully so that tests
do not fail with an exception.
@@ -1548,7 +1548,11 @@ def win32_kill_process_tree(pid, sig=signal.SIGTERM, include_parent=True,
'''
if pid == os.getpid():
raise RuntimeError("I refuse to kill myself")
+ try:
parent = psutil.Process(pid)
+ except psutil.NoSuchProcess:
+ log.debug("PID not found alive: %d", pid)
+ return ([], [])
children = parent.children(recursive=True)
if include_parent:
children.append(parent)
Cell ID performance improvements
Determine if the platform can store cell IDs in an array up front and
use pogo_async's new array storage feature, don't cast between sequence
types since pogo_async can handle each of them now, only round
coordinates if caching IDs.
rpm: Properly detect other ARMv7 (32 Bit) arches
Like it is currently being done for the different
x86 arches (i386, i486, ...).
@@ -30,7 +30,16 @@ ARCHES_ALPHA = (
"alphaev68",
"alphaev7",
)
-ARCHES_ARM = ("armv5tel", "armv5tejl", "armv6l", "armv7l", "aarch64")
+ARCHES_ARM_32 = (
+ "armv5tel",
+ "armv5tejl",
+ "armv6l",
+ "armv6hl",
+ "armv7l",
+ "armv7hl",
+ "armv7hnl",
+)
+ARCHES_ARM_64 = ("aarch64",)
ARCHES_SH = ("sh3", "sh4", "sh4a")
ARCHES = (
@@ -39,7 +48,8 @@ ARCHES = (
+ ARCHES_PPC
+ ARCHES_S390
+ ARCHES_ALPHA
- + ARCHES_ARM
+ + ARCHES_ARM_32
+ + ARCHES_ARM_64
+ ARCHES_SH
)
@@ -66,11 +76,13 @@ def get_osarch():
def check_32(arch, osarch=None):
"""
- Returns True if both the OS arch and the passed arch are 32-bit
+ Returns True if both the OS arch and the passed arch are x86 or ARM 32-bit
"""
if osarch is None:
osarch = get_osarch()
- return all(x in ARCHES_32 for x in (osarch, arch))
+ return all(x in ARCHES_32 for x in (osarch, arch)) or all(
+ x in ARCHES_ARM_32 for x in (osarch, arch)
+ )
def pkginfo(name, version, arch, repoid, install_date=None, install_date_time_t=None):
TST: implemented testing utilities
Updated time unit tests to use the testing utilities.
Remove netaddr useless requirement
This patch cleans up the requirements.txt list to remove netaddr
module actually replaced by oslo_utils.
@@ -8,7 +8,6 @@ automaton>=0.5.0 # Apache-2.0
eventlet!=0.18.3,>=0.18.2 # MIT
WebOb>=1.6.0 # MIT
greenlet>=0.3.2 # MIT
-netaddr!=0.7.16,>=0.7.13 # BSD
paramiko>=2.0 # LGPLv2.1+
python-neutronclient>=5.1.0 # Apache-2.0
python-glanceclient>=2.5.0 # Apache-2.0
Create compilation passes for ASTNode kinds and final structs processing
TN:
@@ -30,9 +30,7 @@ from mako.lookup import TemplateLookup
from langkit import caching, names, template_utils
from langkit.ada_api import AdaAPISettings
from langkit.c_api import CAPISettings
-from langkit.diagnostics import (
- Severity, check_source_language, errors_checkpoint
-)
+from langkit.diagnostics import Severity, check_source_language
import langkit.documentation
from langkit.expressions import PropertyDef
from langkit.passes import (
@@ -948,31 +946,21 @@ class CompileCtx(object):
GlobalPass('annotate fields types',
CompileCtx.annotate_fields_types,
disabled=not annotate_fields_types),
+ GlobalPass('compute ASTNode kind constants',
+ CompileCtx.compute_node_kind_constants),
+
+ # Now that all Struct subclasses referenced by the grammar have
+ # been typed, iterate over all declared subclasses to register the
+ # ones that are unreachable from the grammar. TODO: this kludge
+ # will eventually disappear as part of OC22-016.
+ GlobalPass('add structs to context',
+ CompileCtx.add_structs_to_context),
+ errors_checkpoint_pass,
)
with names.camel_with_underscores:
pass_manager.run(self)
- for i, astnode in enumerate(
- (astnode
- for astnode in self.astnode_types
- if not astnode.abstract),
- # Compute kind constants for all ASTNode concrete subclasses.
- # Start with 1: the constant 0 is reserved as an
- # error/uninitialized code.
- start=1
- ):
- self.node_kind_constants[astnode] = i
-
- # Now that all Struct subclasses referenced by the grammar have been
- # typed, iterate over all declared subclasses to register the ones that
- # are unreachable from the grammar. TODO: this kludge will eventually
- # disappear as part of OC22-016.
- for t in self.struct_types + self.astnode_types:
- t.add_to_context()
-
- errors_checkpoint()
-
def _emit(self, file_root, generate_lexer, main_source_dirs,
main_programs):
"""
@@ -1300,3 +1288,25 @@ class CompileCtx(object):
["-f", "annotate_fields_types",
"--no-diff", "-w"] + list(astnodes_files)
)
+
+ def compute_node_kind_constants(self):
+ """
+ Compute kind constants for all ASTNode concrete subclasses.
+ """
+ for i, astnode in enumerate(
+ (astnode
+ for astnode in self.astnode_types
+ if not astnode.abstract),
+ # Start with 1: the constant 0 is reserved as an
+ # error/uninitialized code.
+ start=1
+ ):
+ self.node_kind_constants[astnode] = i
+
+ def add_structs_to_context(self):
+ """
+ Make sure all Struct subclasses (including ASTNode ones) are added to
+ the context.
+ """
+ for t in self.struct_types + self.astnode_types:
+ t.add_to_context()
Association connect should not blindly assume memberEnds
In the rare case memberEnd instances are missing, we should just
do nothing.
@@ -79,13 +79,16 @@ class AssociationConnect(RelationshipConnect):
subject = line.subject
def member_ends_match(subject):
- return (
+ return len(subject.memberEnd) >= 2 and (
+ (
head_subject is subject.memberEnd[0].type
and tail_subject is subject.memberEnd[1].type
- ) or (
+ )
+ or (
head_subject is subject.memberEnd[1].type
and tail_subject is subject.memberEnd[0].type
)
+ )
# First check if the right subject is already connected:
if line.subject and member_ends_match(line.subject):
Using snapshot alf/examples
When playing a trained model with alf snapshot, we should also set redirect the python path to its examples directory in case some conf files have been changed.
feat: archiving pipelines
$feat: add archive jobs BE integration
$feat: add tests for archive jobs button
@@ -3,6 +3,7 @@ from dbnd._vendor.marshmallow import fields, validate
class JobSchemaV2(ApiObjectSchema):
+ id = fields.Int()
name = fields.Str()
user = fields.Str()
ui_hidden = fields.Boolean()
For the NotificationWithTemplateSchema exclude the scheduled_notifications so we do not query that table.
The scheduled_notifications is not used as of yet.
@@ -449,7 +449,7 @@ class NotificationWithTemplateSchema(BaseSchema):
class Meta:
model = models.Notification
strict = True
- exclude = ('_personalisation', )
+ exclude = ('_personalisation', 'scheduled_notification')
template = fields.Nested(
TemplateSchema,
Update facades for 2.9 release
The following updates the facades to prevent spurious warnings about
missing facades. Although it logs, because nothing has been coded to the
facades we can safely add them without any consequence.
Only configure flint2 once
If we've run configure before and a Makefile exists, let make figure out whether a recompile is necessary of flint2
@@ -8,7 +8,9 @@ pip install -r requirements.txt
# Check for git clone of flint2 on MacOS and install if found
if [ -f flint2/configure ]; then
cd flint2/
+ if [ ! -f Makefile ]; then
./configure
+ fi
make -j4
make install
cd ../
polys: avoid unnecessary using numbered_symbols() in primitive_element()
Also drop redundant polys option
@@ -674,14 +674,14 @@ def primitive_element(extension, **args):
x = Dummy('x')
domain = args.get('domain', QQ)
- F, Y = zip(*[(minimal_polynomial(e, domain=domain).replace(y), y)
- for e, y in zip(extension, numbered_symbols('y', cls=Dummy))])
+ F = [minimal_polynomial(e, domain=domain) for e in extension]
+ Y = [p.gen for p in F]
for u in range(1, (len(F) - 1)*prod(f.degree() for f in F) + 1):
coeffs = [u**n for n in range(len(Y))]
f = x - sum(c*y for c, y in zip(coeffs, Y))
- *H, g = groebner(F + (f,), Y + (x,), domain=domain, polys=True)
+ *H, g = groebner(F + [f], Y + [x], domain=domain)
for i, (h, y) in enumerate(zip(H, Y)):
H[i] = (y - h).eject(*Y).retract(field=True)
@@ -15,6 +15,8 @@ from noc.core.profile.base import BaseProfile
class Profile(BaseProfile):
name = "Eltex.LTE"
pattern_username = r"(?<!Last )login: "
+ username_submit = "\r"
+ password_submit = "\r"
pattern_more = [
(r"\[Yes/press any key for no\]", "Y")
]
remove overwrite __init__
Overrriding __init__ is not necessary.
@@ -36,9 +36,6 @@ INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
class PhotoshopHost(HostBase, IWorkfileHost, ILoadHost):
name = "photoshop"
- def __init__(self):
- super(PhotoshopHost, self).__init__()
-
def install(self):
"""Install Photoshop-specific functionality of avalon-core.
Fix resolving against multiple markers
Fix for when requirements are also present
Fixes
@@ -310,7 +310,9 @@ class Resolver(object):
for dependency_string in dependency_strings:
try:
- individual_dependencies = [dep.strip() for dep in dependency_string.split(', ')]
+ split_deps = dependency_string.split(';')
+ dependencies, markers = split_deps[0], '; '.join(list(set([marker.strip() for marker in split_deps[1:]])))
+ individual_dependencies = [dep.strip() for dep in dependencies.split(', ')]
cleaned_deps = []
for dep in individual_dependencies:
tokens = [token.strip() for token in dep.split(';')]
@@ -325,6 +327,7 @@ class Resolver(object):
cleaned_tokens.extend(markers)
cleaned_deps.append('; '.join(cleaned_tokens))
_dependency_string = ', '.join(set(cleaned_deps))
+ _dependency_string += '; {0}'.format(markers)
yield InstallRequirement.from_line(_dependency_string, constraint=ireq.constraint)
except InvalidMarker:
fix: `set_column_display` contradicts arguments
`show` should set `hidden` as 0, but does the opposite. This is fixed.
Use `Array.isArray()` instead of deprecated usage
@@ -501,9 +501,9 @@ export default class Grid {
}
set_column_disp(fieldname, show) {
- if ($.isArray(fieldname)) {
+ if (Array.isArray(fieldname)) {
for (let field of fieldname) {
- this.update_docfield_property(field, "hidden", show);
+ this.update_docfield_property(field, "hidden", show ? 0 : 1);
this.set_editable_grid_column_disp(field, show);
}
} else {
docs: Updated quickstart docs to import FeatureService
docs: updated quickstart docs to import FeatureService
@@ -82,7 +82,7 @@ online_store:
from datetime import timedelta
-from feast import Entity, FeatureView, Field, FileSource, ValueType
+from feast import Entity, FeatureService, FeatureView, Field, FileSource, ValueType
from feast.types import Float32, Int64
# Read data from parquet files. Parquet is convenient for local development mode. For
Fix computed getter
Content defaults are saved in `diffTracker.contentDefaults`,
not in `diffTracker.content_defaults`
Prioritize `diffTracker`'s values over `channel.content_defaults`
as diffTracker contains the latest updates
[hailtop] use the exact same error message for sync and async
* [hailtop] use the exact same error message for sync and async
Importantly, I want to see the stack trace in either case.
* revert
@@ -629,7 +629,7 @@ async def retry_transient_errors(f: Callable[..., Awaitable[T]], *args, **kwargs
errors += 1
if errors % 10 == 0:
st = ''.join(traceback.format_stack())
- log.warning(f'encountered {errors} errors. My stack trace is {st}. Most recent error was {e}', exc_info=True)
+ log.warning(f'Encountered {errors} errors. My stack trace is {st}. Most recent error was {e}', exc_info=True)
delay = await sleep_and_backoff(delay)
@@ -642,7 +642,8 @@ def sync_retry_transient_errors(f, *args, **kwargs):
except Exception as e:
errors += 1
if errors % 10 == 0:
- log.warning(f'encountered {errors} errors, most recent one was {e}', exc_info=True)
+ st = ''.join(traceback.format_stack())
+ log.warning(f'Encountered {errors} errors. My stack trace is {st}. Most recent error was {e}', exc_info=True)
if is_transient_error(e):
pass
else:
NanoRange: Fixed bugs from comments
Changed the way MSVC is handled,
open for handling version checks
Removed the version number in the conanfile.py
Updated the copy logic from working with .zip to .tar.gz
@@ -6,7 +6,6 @@ from conans.errors import ConanInvalidConfiguration
class NanorangeConan(ConanFile):
name = "nanorange"
- version = "20191001"
license = "Boost 1.0"
author = "Paul M. Bendixen [email protected]"
url = "github.com/conan-io/conan-center-index"
@@ -18,6 +17,10 @@ class NanorangeConan(ConanFile):
# No settings/options are necessary, this is header only
def configure(self):
+ if self.settings.compiler == "Visual Studio":
+ if not any([self.settings.compiler.cppstd == std for std in ["17", "20"]]):
+ raise ConanInvalidConfiguration("nanoRange requires at least c++17")
+ else:
if not any([str(self.settings.compiler.cppstd) == std for std in ["17", "20", "gnu17", "gnu20"]]):
raise ConanInvalidConfiguration("nanoRange requires at least c++17")
@@ -25,7 +28,7 @@ class NanorangeConan(ConanFile):
tools.get(**self.conan_data["sources"][self.version])
def package(self):
- sourceSubfolder="NanoRange-{}".format( self.conan_data["sources"][self.version]["url"].split("/")[-1][:-4])
+ sourceSubfolder="NanoRange-{}".format( self.conan_data["sources"][self.version]["url"].split("/")[-1][:-7])
self.copy("*.hpp", src="{}/include".format(sourceSubfolder), dst="include" )
self.copy("LICENSE_1_0.txt", src=sourceSubfolder, dst="licenses")
Added missing component of cmac to save file
_n was not saved in the save file
Add pre-conditions to avoid on_timeout being called after stop()
Apparently the cancellation request for a TimerHandle doesn't
necessarily have to be honoured despite large periods of time passing
@@ -314,7 +314,9 @@ class View:
self._timeout_handler = loop.call_later(self.timeout, self.dispatch_timeout)
def dispatch_timeout(self):
- if not self._stopped.done():
+ if self._stopped.done():
+ return
+
self._stopped.set_result(True)
asyncio.create_task(self.on_timeout(), name=f'discord-ui-view-timeout-{self.id}')
change add_outgrads and primitive_mut_add to do the vspace.zeros()
initialization inside primitive_mut_add
Compare the bytes we read with a bytes object, not str.
Fixes
@@ -23,7 +23,7 @@ def print_unsourced_ids_from_wikipedia():
for page_id, type in cursor:
if type == b'page':
print(page_id)
- elif type == 'subcat':
+ elif type == b'subcat':
subcategories.add(page_id)
if not subcategories:
break
improved export file
export original file. Or export sqlite created file.
@@ -1122,7 +1122,7 @@ class DialogManageFiles(QtWidgets.QDialog):
def export(self):
""" Export files to selected directory.
If an imported file was from a docx, odt, pdf, html, epub then export the original file
- and also export the plain text version.
+ If the file was created within QualCoder (so only in the database), export as plain text.
"""
index_list = self.ui.tableWidget.selectionModel().selectedIndexes()
@@ -1145,15 +1145,15 @@ class DialogManageFiles(QtWidgets.QDialog):
if not ok:
return
- # redo ms as filenames may change for created files and for original file documents
+ # redo msg as filenames may change for created files and for original file documents
msg = _("Export to ") + directory + "\n"
for row in rows:
filename = self.source[row]['name']
filename_txt = None
- if len(filename) > 5 and (filename[-5:] == ".html" or filename[-5:] == ".docx" or filename[-5:] == ".epub"):
+ '''if len(filename) > 5 and (filename[-5:] == ".html" or filename[-5:] == ".docx" or filename[-5:] == ".epub"):
filename_txt = filename[0:len(filename) - 5] + ".txt"
if len(filename) > 4 and (filename[-4:] == ".htm" or filename[-4:] == ".odt" or filename[-4] == ".txt"):
- filename_txt = filename[0:len(filename) - 4] + ".txt"
+ filename_txt = filename[0:len(filename) - 4] + ".txt" '''
# Below is for transcribed files and for user created text files within QualCoder
if self.source[row]['mediapath'] is None and filename_txt is None:
filename_txt = filename + ".txt"
Classes for extensions
Added for both extensions and lnfaucet db
[bugfix] Fix _formatLimit_MonthOfYear
Limit is given as 1900 but not recognized by predicate
@@ -2152,7 +2152,7 @@ formatLimits = {
}
# All month of year articles are in the same format
-_formatLimit_MonthOfYear = (lambda v: 1 <= 1900 and v < 2051, 1900, 2051)
+_formatLimit_MonthOfYear = (lambda v: 1900 <= v < 2051, 1900, 2051)
for month in yrMnthFmts:
formatLimits[month] = _formatLimit_MonthOfYear
test: Misc update in test_tutorial
Add missing remove_target call from "Delegate to Hashed Bins"
section
Add comments to dirty_roles output assertion
@@ -134,7 +134,8 @@ class TestTutorial(unittest.TestCase):
repository.root.load_signing_key(private_root_key)
repository.root.load_signing_key(private_root_key2)
- # Patch logger to assert that it accurately logs dirty roles
+ # NOTE: The tutorial does not call dirty_roles anymore due to #964 and
+ # #958. We still call it here to see if roles are dirty as expected.
with mock.patch("tuf.repository_tool.logger") as mock_logger:
repository.dirty_roles()
# Concat strings to avoid Python2/3 unicode prefix problems ('' vs. u'')
@@ -186,6 +187,8 @@ class TestTutorial(unittest.TestCase):
repository.timestamp.expiration = datetime.datetime(2080, 10, 28, 12, 8)
+ # NOTE: The tutorial does not call dirty_roles anymore due to #964 and
+ # #958. We still call it here to see if roles are dirty as expected.
with mock.patch("tuf.repository_tool.logger") as mock_logger:
repository.dirty_roles()
# Concat strings to avoid Python2/3 unicode prefix problems ('' vs. u'')
@@ -265,7 +268,8 @@ class TestTutorial(unittest.TestCase):
'timestamp_key', 'password')
repository.timestamp.load_signing_key(private_timestamp_key)
- # Patch logger to assert that it accurately logs dirty roles
+ # NOTE: The tutorial does not call dirty_roles anymore due to #964 and
+ # #958. We still call it here to see if roles are dirty as expected.
with mock.patch("tuf.repository_tool.logger") as mock_logger:
repository.dirty_roles()
# Concat strings to avoid Python2/3 unicode prefix problems ('' vs. u'')
@@ -278,6 +282,8 @@ class TestTutorial(unittest.TestCase):
self.assertTrue(os.path.exists(os.path.join(
'repository','targets', 'myproject', 'file4.txt')))
+ # NOTE: The tutorial does not call dirty_roles anymore due to #964 and
+ # #958. We still call it here to see if roles are dirty as expected.
with mock.patch("tuf.repository_tool.logger") as mock_logger:
repository.dirty_roles()
# Concat strings to avoid Python2/3 unicode prefix problems ('' vs. u'')
@@ -318,7 +324,8 @@ class TestTutorial(unittest.TestCase):
'unclaimed_key', 'password')
repository.targets("unclaimed").load_signing_key(private_unclaimed_key)
-
+ # NOTE: The tutorial does not call dirty_roles anymore due to #964 and
+ # #958. We still call it here to see if roles are dirty as expected.
with mock.patch("tuf.repository_tool.logger") as mock_logger:
repository.dirty_roles()
# Concat strings to avoid Python2/3 unicode prefix problems ('' vs. u'')
@@ -337,6 +344,7 @@ class TestTutorial(unittest.TestCase):
# ----- Tutorial Section: Delegate to Hashed Bins
+ repository.targets('unclaimed').remove_target("myproject/file4.txt")
targets = repository.get_filepaths_in_directory(
os.path.join('repository', 'targets', 'myproject'), recursive_walk=True)
@@ -362,10 +370,11 @@ class TestTutorial(unittest.TestCase):
])
-
for delegation in repository.targets('unclaimed').delegations:
delegation.load_signing_key(private_unclaimed_key)
+ # NOTE: The tutorial does not call dirty_roles anymore due to #964 and
+ # #958. We still call it here to see if roles are dirty as expected.
with mock.patch("tuf.repository_tool.logger") as mock_logger:
repository.dirty_roles()
# Concat strings to avoid Python2/3 unicode prefix problems ('' vs. u'')
Fix 'navigation_depth' functionality
Read the Docs was not using the sphinx_rtd_theme settings due to
clobbering the configuration dictionary, tweaked conf.py to avoid this.
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
+import importlib
import os
import warnings
# import sys
@@ -61,12 +62,9 @@ warnings.filterwarnings("ignore", category=UserWarning,
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
-on_rtd = os.environ.get('READTHEDOCS') == 'True'
-if on_rtd:
- html_theme = 'default'
-else:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
+html_style = None
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
Enable tuples and lists in handle_probability_param()
handle_probability_param() in parameters.py has so
far only supported single numbers, True, False and
StochasticParameter. Now it also supports tuples
of form (a, b), which are transformed to Uniform
and lists of form [a, b, c, ...], which are
transformed to Choice. These are useful for masks.
@@ -105,7 +105,7 @@ def handle_discrete_param(param, name, value_range=None, tuple_to_uniform=True,
list_str = ", list of %s" % (allowed_type,) if list_to_choice else ""
raise Exception("Expected %s, tuple of two %s%s or StochasticParameter for %s, got %s." % (allowed_type, allowed_type, list_str, name, type(param),))
-def handle_probability_param(param, name):
+def handle_probability_param(param, name, tuple_to_uniform=False, list_to_choice=False):
eps = 1e-6
if param in [True, False, 0, 1]:
return Deterministic(int(param))
@@ -115,6 +115,20 @@ def handle_probability_param(param, name):
return Deterministic(int(round(param)))
else:
return Binomial(param)
+ elif tuple_to_uniform and isinstance(param, tuple):
+ ia.do_assert(all([
+ ia.is_single_number(v) for v in param
+ ]), "Expected parameter '%s' of type tuple to only contain number, got %s." % (name, [type(v) for v in param],))
+ ia.do_assert(len(param) == 2)
+ ia.do_assert(0 <= param[0] <= 1.0)
+ ia.do_assert(0 <= param[1] <= 1.0)
+ return Binomial(Uniform(param[0], param[1]))
+ elif list_to_choice and ia.is_iterable(param):
+ ia.do_assert(all([
+ ia.is_single_number(v) for v in param
+ ]), "Expected iterable parameter '%s' to only contain number, got %s." % (name, [type(v) for v in param],))
+ ia.do_assert(all([0 <= p_i <= 1.0 for p_i in param]))
+ return Binomial(Choice(param))
elif isinstance(param, StochasticParameter):
return param
else:
GDB helpers: emit bind directives for BindingScope
TN:
Deletion: remove outer try/except block in reaper run_once
run_daemon already takes care of catching unhandled exceptions
and re-trying the run_once function.
@@ -463,7 +463,7 @@ def _run_once(rses, include_rses, exclude_rses, vos, chunk_size, greedy, scheme,
if not rses_to_process:
logger(logging.ERROR, 'Reaper: No RSEs found. Will sleep for 30 seconds')
return
- try:
+
dict_rses = {}
_, total_workers, logger = heartbeat_handler.live()
tot_needed_free_space = 0
@@ -601,11 +601,6 @@ def _run_once(rses, include_rses, exclude_rses, vos, chunk_size, greedy, scheme,
if paused_rses:
logger(logging.INFO, 'Deletion paused for a while for following RSEs: %s', ', '.join(paused_rses))
- except DatabaseException as error:
- logger(logging.WARNING, 'Reaper: %s', str(error))
- except Exception:
- logger(logging.CRITICAL, 'Exception', exc_info=True)
-
def stop(signum=None, frame=None):
"""
@@ -6,7 +6,7 @@ package ${ada_lib_name}.Introspection is
Invalid_Field : exception;
- ## In a lot of testcases, there is a single concrete AST node that has no
+ ## In a lot of testcases, there is a single concrete node that has no
## field. For these, generates a type that has no valid value.
type Field_Reference is
% if ctx.sorted_parse_fields:
@@ -16,25 +16,26 @@ package ${ada_lib_name}.Introspection is
new Integer range 1 .. 0
% endif
;
- -- Enumeration of all AST node fields
+ -- Enumeration of all node fields
function Field_Name (Field : Field_Reference) return String;
-- Return a lower-case name for Field
function Index (Field : Field_Reference) return Positive;
- -- Return the index in AST nodes to access the given Field
+ -- Return the index in nodes to access the given ``Field``
function Field_Reference_From_Index
(Kind : ${root_node_kind_name}; Index : Positive) return Field_Reference;
- -- Return the field reference corresponding to the given Index in AST nodes
- -- of the given Kind. Raise an Invalid_Field if there is no field
- -- corresponding to this index.
+ -- Return the field reference corresponding to the given ``Index`` in nodes
+ -- of the given ``Kind``. Raise an ``Invalid_Field`` exception if there is
+ -- no field corresponding to this index.
type Field_Reference_Array is array (Positive range <>) of Field_Reference;
function Fields
(Kind : ${root_node_kind_name}) return Field_Reference_Array;
- -- Return the list of fields that AST nodes of the given Kind have
+ -- Return the list of fields that nodes of the given ``Kind`` have. This
+ -- returns an empty array for list nodes.
function Token_Node_Kind
(Kind : ${root_node_kind_name}) return Token_Kind
@@ -42,7 +43,7 @@ package ${ada_lib_name}.Introspection is
-- Return the token kind corresponding to the given token node kind
% if not ctx.generate_unparser:
--
- -- As unparser are not generated, this always raises a Program_Error
+ -- As unparser are not generated, this always raises a ``Program_Error``
-- exception.
% endif
Typo ?
I removed "-e" option from "pip install -e dist/*.whl # installs jaxlib (includes XLA)" line 58. It is now coherent with lines 69-70.
When I tried the command with the "-e" it threw an error, without "-e" it worked fine.
@@ -55,7 +55,7 @@ You can install the necessary Python dependencies using ``pip``::
To build ``jaxlib`` with CUDA support, you can run::
python build/build.py --enable_cuda
- pip install -e dist/*.whl # installs jaxlib (includes XLA)
+ pip install dist/*.whl # installs jaxlib (includes XLA)
See ``python build/build.py --help`` for configuration options, including ways to
Fix arguments parsing in RandomGhosting
Fixes
@@ -50,22 +50,40 @@ class RandomGhosting(RandomTransform):
if axis not in (0, 1, 2):
raise ValueError(f'Axes must be in (0, 1, 2), not "{axes}"')
self.axes = axes
- if isinstance(num_ghosts, int):
- self.num_ghosts_range = num_ghosts, num_ghosts
- elif isinstance(num_ghosts, tuple) and len(num_ghosts) == 2:
- self.num_ghosts_range = num_ghosts
- self.intensity_range = self.parse_range(intensity, 'intensity')
- for n in self.intensity_range:
- if n < 0:
- message = (
- f'Intensity must be a positive number, not {n}')
- raise ValueError(message)
+ self.num_ghosts_range = self.parse_num_ghosts(num_ghosts)
+ self.intensity_range = self.parse_intensity(intensity)
if not 0 <= restore < 1:
message = (
f'Restore must be a number between 0 and 1, not {restore}')
raise ValueError(message)
self.restore = restore
+ @staticmethod
+ def parse_num_ghosts(num_ghosts):
+ try:
+ iter(num_ghosts)
+ except TypeError:
+ num_ghosts = num_ghosts, num_ghosts
+ for n in num_ghosts:
+ if not isinstance(n, int) or n < 0:
+ message = (
+ f'Number of ghosts must be a natural number, not {n}')
+ raise ValueError(message)
+ return num_ghosts
+
+ @staticmethod
+ def parse_intensity(intensity):
+ try:
+ iter(intensity)
+ except TypeError:
+ intensity = intensity, intensity
+ for n in intensity:
+ if n < 0:
+ message = (
+ f'Intensity must be a positive number, not {n}')
+ raise ValueError(message)
+ return intensity
+
def apply_transform(self, sample: Subject) -> dict:
random_parameters_images_dict = {}
for image_name, image_dict in sample.get_images_dict().items():
Fix ToTensor when PIL Image has mode F
Fixes
The only case of floating point supported by PIL seems to be `F`, so this should fix it.
Remove unused variable
My editor keeps moaning about it.
@@ -164,7 +164,6 @@ def measure_by_ccg(request, format=None):
org_ids = utils.param_to_list(request.query_params.get('org', []))
tags = [x for x in request.query_params.get('tags', '').split(',') if x]
- rolled = {}
measure_values = MeasureValue.objects.by_ccg(org_ids, measure_id, tags)
rsp_data = {
Extend the incremental marker for parametrize
The incremental marker is adapted to handle properly test classes with parametrize defined at class level.
Fix
@@ -461,21 +461,49 @@ an ``incremental`` marker which is to be used on classes:
# content of conftest.py
- import pytest
+ # store history of failures per test class name and per index in parametrize (if parametrize used)
+ _test_failed_incremental: Dict[str, Dict[Tuple[int, ...], str]] = {}
def pytest_runtest_makereport(item, call):
if "incremental" in item.keywords:
+ # incremental marker is used
if call.excinfo is not None:
- parent = item.parent
- parent._previousfailed = item
+ # the test has failed
+ # retrieve the class name of the test
+ cls_name = str(item.cls)
+ # retrieve the index of the test (if parametrize is used in combination with incremental)
+ parametrize_index = (
+ tuple(item.callspec.indices.values())
+ if hasattr(item, "callspec")
+ else ()
+ )
+ # retrieve the name of the test function
+ test_name = item.originalname or item.name
+ # store in _test_failed_incremental the original name of the failed test
+ _test_failed_incremental.setdefault(cls_name, {}).setdefault(
+ parametrize_index, test_name
+ )
def pytest_runtest_setup(item):
if "incremental" in item.keywords:
- previousfailed = getattr(item.parent, "_previousfailed", None)
- if previousfailed is not None:
- pytest.xfail("previous test failed ({})".format(previousfailed.name))
+ # retrieve the class name of the test
+ cls_name = str(item.cls)
+ # check if a previous test has failed for this class
+ if cls_name in _test_failed_incremental:
+ # retrieve the index of the test (if parametrize is used in combination with incremental)
+ parametrize_index = (
+ tuple(item.callspec.indices.values())
+ if hasattr(item, "callspec")
+ else ()
+ )
+ # retrieve the name of the first test function to fail for this class name and index
+ test_name = _test_failed_incremental[cls_name].get(parametrize_index, None)
+ # if name found, test has failed for the combination of class name & test name
+ if test_name is not None:
+ pytest.xfail("previous test failed ({})".format(test_name))
+
These two hook implementations work together to abort incremental-marked
tests in a class. Here is a test module example:
add Namespace.add_field
This patch adds the add_field method to namespace v2 to facilitate the creation
of fields that have the same name as their argument.
@@ -709,6 +709,24 @@ class Namespace:
raise ValueError('Cannot define the jacobian {!r}: dimension is negative.'.format(jacobian))
setattr(self, jacobian, function.jacobian(geom, numpy.size(geom) - i))
+ def add_field(self, __names: Union[str, Sequence[str]], *__bases, shape: Tuple[int, ...] = (), dtype: function.DType = float):
+ '''Add field(s) of the form ns.u = function.dotarg('u', ...)
+
+ Parameters
+ ----------
+ names : :class:`str` or iterable thereof
+ Name of both the generated field and the function argument.
+ bases : :class:`nutils.function.Array` or something that can be :meth:`nutils.function.Array.cast` into one
+ The arrays to take inner products with.
+ shape : :class:`tuple` of :class:`int`, optional
+ The shape to be appended to the argument.
+ dtype : :class:`bool`, :class:`int`, :class:`float` or :class:`complex`
+ The dtype of the argument.
+ '''
+
+ for name in (__names,) if isinstance(__names, str) else __names:
+ setattr(self, name, function.dotarg(name, *__bases, shape=shape, dtype=dtype))
+
def copy_(self, **replacements: Mapping[str, function.Array]) -> 'Namespace':
'''Return a copy of this namespace.
Missing Sample
Updated to show missing sample text for a response.
@@ -144,11 +144,12 @@ General Issues
2. If an issue has a Jira ticket with a ``help-wanted`` label, there is a Help Wanted ticket in GitHub. It can be closed with the following note:
.. code-block:: text
+
Hi @username
Thanks for the report! We have created a [Help Wanted issue here](link to GitHub issue) and are looking for community's help. Would you be interested helping with a pull request?
-3. If an issue has a Jira ticket without a ``help-wanted`` label and assigned to the current release fix version for a developer to fix, it can be closed with the following note
+3. If an issue has a Jira ticket without a ``help-wanted`` label and assigned to the current release fix version for a developer to fix, it can be closed with the following note:
.. code-block:: text
@@ -171,7 +172,7 @@ General Issues
Feature Requests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Respond to the issue with the following note
+Respond to the issue with the following note:
.. code-block:: text
Attempt to fix pipe_to unit test on Windows (for real this time)
The previous fix was apparently broken when I checked in with Linux line endings. This approach should be independent of that.
@@ -406,13 +406,10 @@ def test_pipe_to_shell(base_app):
# Windows
# Get help menu and pipe it's output to the sort shell command
out = run_cmd(base_app, 'help | sort')
- expected = normalize("""
-
-
-_relative_load edit history pause pyscript run set shortcuts
-========================================
-cmdenvironment help load py quit save shell show
-Documented commands (type help <topic>):""")
+ expected = ['', '', '_relative_load edit history pause pyscript run set shortcuts',
+ '========================================',
+ 'cmdenvironment help load py quit save shell show',
+ 'Documented commands (type help <topic>):']
assert out == expected
else:
# Mac and Linux
Update Task API
Including ended_at in datetime_fields
Removing unnecessary DateTimeFilters
Update CONTRIBUTING.md
Update the contributing instructions to use python-poetry instead of sdispater as the repository namespace.
@@ -87,7 +87,7 @@ You will need Poetry to start contributing on the Poetry codebase. Refer to the
You will first need to clone the repository using `git` and place yourself in its directory:
```bash
-$ git clone [email protected]:sdispater/poetry.git
+$ git clone [email protected]:python-poetry/poetry.git
$ cd poetry
```
Fix handling of ZFIT_DISABLE_TF_WARNING environment variable.
The logic in _maybe_disable_warnings() did not actually do what
the warning about the suppression of TensorFlow warnings claimed.
Setting the environment variable had no effect.
Also slightly simplified the wording of the warning.
"""Top-level package for zfit."""
# Copyright (c) 2021 zfit
-import inspect
-import sys
import warnings
from pkg_resources import get_distribution
@@ -32,15 +30,16 @@ __all__ = ["z", "constraint", "pdf", "minimize", "loss", "core", "data", "func",
def _maybe_disable_warnings():
import os
- true = "IS_TRUE"
- if not os.environ.get("ZFIT_DISABLE_TF_WARNINGS", true):
- return
- elif true:
- warnings.warn("All TensorFlow warnings are by default suppressed by zfit."
- " In order to not suppress them,"
- " set the environment variable ZFIT_DISABLE_TF_WARNINGS to 0."
+ disable_warnings = os.environ.get("ZFIT_DISABLE_TF_WARNINGS")
+ if disable_warnings is None:
+ warnings.warn("TensorFlow warnings are by default suppressed by zfit."
+ " In order to show them,"
+ " set the environment variable ZFIT_DISABLE_TF_WARNINGS=0."
" In order to suppress the TensorFlow warnings AND this warning,"
- " set ZFIT_DISABLE_TF_WARNINGS manually to 1.")
+ " set ZFIT_DISABLE_TF_WARNINGS=1.")
+ elif disable_warnings == '0':
+ return
+
os.environ["KMP_AFFINITY"] = "noverbose"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
Add flag to disable reservation cleanup
This shouldn't be needed on our patched version of k8s that doesn't send
offers to maint'd hosts. This adds a flag so I can disable it in the
cron job that cleans up maint'd hosts.
[modules/spotify] enable scrolling
this change should enable scrolling for the spotify module
(unfortunately, i am unable to fully test this, as i am not using
spotify)
fixes
Fix "platform_adaptation" documentation test on windows
For reasons I don't full understand, including "windows.h" seems to break everything. There's an alternative sleep function in stdlib.h so I've used that instead since it makes the point just as well.
Fix typos
Fixed minor typos - Azaras to Azara's / Ruis' to Rui's
@@ -89,7 +89,7 @@ True
## 4. Combine matched records
-Implement the `create_record()` function that takes a `(treasure, coordinate)` pair from Azaras list and a `(location, coordinate, quadrant)` record from Ruis' list and returns `(treasure, coordinate, location, coordinate, quadrant)` **if the coordinates match**.
+Implement the `create_record()` function that takes a `(treasure, coordinate)` pair from Azara's list and a `(location, coordinate, quadrant)` record from Rui's list and returns `(treasure, coordinate, location, coordinate, quadrant)` **if the coordinates match**.
If the coordinates _do not_ match, return the string **"not a match"**
Re-format the coordinate as needed for accurate comparison.
Allow nic-config conversion without Heat
The current script requires the orchestration (Heat)
be available. This change will allow the script to convert
existing templates provided without the orchestration
service present.
@@ -82,6 +82,13 @@ def parse_opts(argv):
parser.add_argument('template',
metavar='TEMPLATE_FILE',
help='Existing NIC config template to convert.')
+ parser.add_argument('--standalone',
+ default=False,
+ action='store_true',
+ help='This switch allows the script to operate in '
+ 'environments where the orchestration service '
+ 'is not available. Such as environemnts with '
+ 'ephemeral-heat')
opts = parser.parse_args(argv[1:])
@@ -225,7 +232,8 @@ class ConvertToAnsibleJ2(object):
if isinstance(param, str):
if param in self.param_to_var_map:
return self.param_to_var_map[param]
- elif param in self.stack_env.get('parameter_defaults', {}):
+ elif (self.stack_env and
+ param in self.stack_env.get('parameter_defaults', {})):
stack_value = self.stack_env['parameter_defaults'][param]
print('INFO - Custom Parameter {} was hard-coded in the '
'converted template using the value from the Heat stack '
@@ -389,7 +397,7 @@ class ConvertToAnsibleJ2(object):
net_config_res_props = net_config_res['properties']
if net_config_res['type'] == 'OS::Heat::Value':
- h_net_conf = net_config_res_props['value']
+ h_net_conf = net_config_res_props['value']['network_config']
elif net_config_res['type'] == 'OS::Heat::SoftwareConfig':
h_net_conf = net_config_res_props['config']['str_replace'][
'params']['$network_config']['network_config']
@@ -501,7 +509,10 @@ def main():
j2_template = os.path.splitext(template)[0] + '.j2'
validate_files(opts, template, networks_file, j2_template)
+ if not opts.standalone:
stack_env = get_stack_environment(opts.stack)
+ else:
+ stack_env = None
converter = ConvertToAnsibleJ2(stack_env, networks_file)
Fix Sphinx crossrefs to 'Client'.
Broken by move to 'spanner_v1' (the aliases in 'spanner' are not honored).
Closes
@@ -42,23 +42,23 @@ Spanner Client
Instantiating a Client
----------------------
-To use the API, the :class:`~google.cloud.spanner.client.Client`
+To use the API, the :class:`~google.cloud.spanner_v1.client.Client`
class defines a high-level interface which handles authorization
and creating other objects:
.. code:: python
- from google.cloud import spanner
- client = spanner.Client()
+ from google.cloud import spanner_v1
+ client = spanner_v1.Client()
Long-lived Defaults
-------------------
-When creating a :class:`~google.cloud.spanner.client.Client`, the
+When creating a :class:`~google.cloud.spanner_v1.client.Client`, the
``user_agent`` and ``timeout_seconds`` arguments have sensible
defaults
-(:data:`~google.cloud.spanner.client.DEFAULT_USER_AGENT` and
-:data:`~google.cloud.spanner.client.DEFAULT_TIMEOUT_SECONDS`).
+(:data:`~google.cloud.spanner_v1.client.DEFAULT_USER_AGENT` and
+:data:`~google.cloud.spanner_v1.client.DEFAULT_TIMEOUT_SECONDS`).
However, you may over-ride them and these will be used throughout all API
requests made with the ``client`` you create.
@@ -74,22 +74,22 @@ Configuration
Engine or Google Compute Engine the project will be detected automatically.
(Setting this environment variable is not required, you may instead pass the
``project`` explicitly when constructing a
- :class:`~google.cloud.spanner.client.Client`).
+ :class:`~google.cloud.spanner_v1.client.Client`).
- After configuring your environment, create a
- :class:`~google.cloud.spanner.client.Client`
+ :class:`~google.cloud.spanner_v1.client.Client`
.. code::
- >>> from google.cloud import spanner
- >>> client = spanner.Client()
+ >>> from google.cloud import spanner_v1
+ >>> client = spanner_v1.Client()
or pass in ``credentials`` and ``project`` explicitly
.. code::
- >>> from google.cloud import spanner
- >>> client = spanner.Client(project='my-project', credentials=creds)
+ >>> from google.cloud import spanner_v1
+ >>> client = spanner_v1.Client(project='my-project', credentials=creds)
.. tip::
@@ -106,8 +106,8 @@ Warnings about Multiprocessing
Next Step
---------
-After a :class:`~google.cloud.spanner.client.Client`, the next
-highest-level object is an :class:`~google.cloud.spanner.instance.Instance`.
+After a :class:`~google.cloud.spanner_v1.client.Client`, the next
+highest-level object is an :class:`~google.cloud.spanner_v1.instance.Instance`.
You'll need one before you can interact with databases.
Next, learn about the :doc:`instance-usage`.
[TVMC] Keep quantized weights when importing PyTorch model
BYOC requires `keep_quantized_weight` be set to true when converting
PyTorch models using `from_torch`. Setting this to be True when using
TVMC.
@@ -262,7 +262,9 @@ class PyTorchFrontend(Frontend):
input_shapes = list(shape_dict.items())
logger.debug("parse Torch model and convert into Relay computation graph")
- return relay.frontend.from_pytorch(traced_model, input_shapes, **kwargs)
+ return relay.frontend.from_pytorch(
+ traced_model, input_shapes, keep_quantized_weight=True, **kwargs
+ )
class PaddleFrontend(Frontend):
Release: Make sure to check with pip locally before uploading to PyPI
* This will avoid breakage like recently with runners wrongly handled
by pip.
* Only very basic test is done with pip installed Nuitka.
from __future__ import print_function
import os
+import sys
+import shutil
from nuitka.tools.release.Documentation import createReleaseDocumentation
from nuitka.tools.release.Release import checkBranchName
@@ -53,31 +55,31 @@ def main():
contents = open("README.rst", "rb").read()
assert b".. contents" not in contents
+ shutil.rmtree("check_nuitka", ignore_errors = True)
+ shutil.rmtree("dist", ignore_errors = True)
+
print("Creating documentation.")
createReleaseDocumentation()
print("Creating source distribution.")
assert os.system("python setup.py sdist") == 0
+
+ print("Creating virtualenv for quick test:")
+ assert os.system("virtualenv check_nuitka") == 0
+
+ print("Installing Nuitka into virtualenv:")
+ print("*" * 40)
+ assert os.system("cd check_nuitka; . bin/activate; pip install ../dist/Nuitka*.tar.gz") == 0
+ print("*" * 40)
+
+ print("Compiling basic test:")
+ print("*" * 40)
+ assert os.system("cd check_nuitka; . bin/activate; nuitka-run ../tests/basics/Asserts.py") == 0
+ print("*" * 40)
+
+ if "check" not in sys.argv:
+ assert False
print("Uploading source dist")
assert os.system("twine upload dist/*") == 0
print("Uploaded.")
-
- # TODO: This won't work yet.
- # import time
- # import xmlrpclib
- # if False:
- # for _i in range(60):
- # # Wait some time for PyPI to catch up with us. Without delay
- # # the old version will still appear. Since this is running
- # # in a Buildbot, we need not be optimal.
- # time.sleep(5*60)
- #
- # pypi = xmlrpclib.ServerProxy("https://pypi.python.org/pypi")
- # pypi_versions = pypi.package_releases("Nuitka")
- #
- # assert len(pypi_versions) == 1, pypi_versions
- # if nuitka_version == pypi_versions[0]:
- # break
- #
- # print("Version check failed:", nuitka_version, pypi_versions)
- #
- # print("Uploaded OK:", pypi_versions[0])
+ else:
+ print("Checked OK, not uploaded.")
Making the start of stop string to mark hidden tests configurable
TODO: Find out why nbgrader quickstart does not put them into the configuration file?
@@ -3,13 +3,39 @@ import re
from .. import utils
from . import NbGraderPreprocessor
+from traitlets import Unicode
+from textwrap import dedent
class RemoveHidden(NbGraderPreprocessor):
+ hidestart = Unicode(
+ '### HIDESTART',
+ config=True,
+ help=dedent(
+ """
+ Suppose you want to hide some test cases from your students in a cell.
+ Place this string before those test cases and the corresponding string
+ hideend after them.
+ """
+ )
+ )
+
+ hideend = Unicode(
+ '### HIDEEND',
+ config=True,
+ help=dedent(
+ """
+ Suppose you want to hide some test cases from your students in a cell.
+ Place this string after those tests.
+ """
+ )
+ )
def preprocess_cell(self, cell, resources, cell_index):
if utils.is_grade(cell) or utils.is_solution(cell) or utils.is_locked(cell):
- cell.source = re.sub('START(?:.|\n)*?STOP', '', cell.source)
+ cell.source = re.sub('{}(?:.|\n)*?{}'.format(self.hidestart,
+ self.hideend)
+ , '', cell.source)
# we probably don't really need this?
cell.metadata.nbgrader['oldchecksum'] = cell.metadata.nbgrader['checksum']
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.