{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'PDF TO Markdown' && linkText !== 'PDF TO Markdown' ) { link.textContent = 'PDF TO Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== 'Voice Cloning' ) { link.textContent = 'Voice Cloning'; link.href = 'https://vibevoice.info/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'PDF TO Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); ',\n b'',\n b''\n ]\n self.assertEqual([b'\\n'.join(expected)], request.written)\n\n d.addCallback(cbRendered)\n return d\n\n\n def test_logFailed(self):\n \"\"\"\n When a request fails, the string form of the failure is logged.\n \"\"\"\n logObserver = proto_helpers.EventLoggingObserver.createWithCleanup(\n self,\n globalLogPublisher\n )\n\n f = failure.Failure(ArbitraryError())\n request = DummyRequest([b''])\n issue = distrib.Issue(request)\n issue.failed(f)\n self.assertEquals(1, len(logObserver))\n self.assertIn(\n \"Failure instance\",\n logObserver[0][\"log_format\"]\n )\n\n\n def test_requestFail(self):\n \"\"\"\n When L{twisted.web.distrib.Request}'s fail is called, the failure\n is logged.\n \"\"\"\n logObserver = proto_helpers.EventLoggingObserver.createWithCleanup(\n self,\n globalLogPublisher\n )\n err = ArbitraryError()\n f = failure.Failure(err)\n req = distrib.Request(DummyChannel())\n req.fail(f)\n self.flushLoggedErrors(ArbitraryError)\n self.assertEquals(1, len(logObserver))\n self.assertIs(logObserver[0][\"log_failure\"], f)\n\n\n\nclass _PasswordDatabase:\n def __init__(self, users):\n self._users = users\n\n\n def getpwall(self):\n return iter(self._users)\n\n\n def getpwnam(self, username):\n for user in self._users:\n if user[0] == username:\n return user\n raise KeyError()\n\n\n\nclass UserDirectoryTests(unittest.TestCase):\n \"\"\"\n Tests for L{UserDirectory}, a resource for listing all user resources\n available on a system.\n \"\"\"\n def setUp(self):\n self.alice = ('alice', 'x', 123, 456, 'Alice,,,', self.mktemp(), '/bin/sh')\n self.bob = ('bob', 'x', 234, 567, 'Bob,,,', self.mktemp(), '/bin/sh')\n self.database = _PasswordDatabase([self.alice, self.bob])\n self.directory = distrib.UserDirectory(self.database)\n\n\n def test_interface(self):\n \"\"\"\n L{UserDirectory} instances provide L{resource.IResource}.\n \"\"\"\n self.assertTrue(verifyObject(resource.IResource, self.directory))\n\n\n def _404Test(self, name):\n \"\"\"\n Verify that requesting the C{name} child of C{self.directory} results\n in a 404 response.\n \"\"\"\n request = DummyRequest([name])\n result = self.directory.getChild(name, request)\n d = _render(result, request)\n def cbRendered(ignored):\n self.assertEqual(request.responseCode, 404)\n d.addCallback(cbRendered)\n return d\n\n\n def test_getInvalidUser(self):\n \"\"\"\n L{UserDirectory.getChild} returns a resource which renders a 404\n response when passed a string which does not correspond to any known\n user.\n \"\"\"\n return self._404Test('carol')\n\n\n def test_getUserWithoutResource(self):\n \"\"\"\n L{UserDirectory.getChild} returns a resource which renders a 404\n response when passed a string which corresponds to a known user who has\n neither a user directory nor a user distrib socket.\n \"\"\"\n return self._404Test('alice')\n\n\n def test_getPublicHTMLChild(self):\n \"\"\"\n L{UserDirectory.getChild} returns a L{static.File} instance when passed\n the name of a user with a home directory containing a I{public_html}\n directory.\n \"\"\"\n home = filepath.FilePath(self.bob[-2])\n public_html = home.child('public_html')\n public_html.makedirs()\n request = DummyRequest(['bob'])\n result = self.directory.getChild('bob', request)\n self.assertIsInstance(result, static.File)\n self.assertEqual(result.path, public_html.path)\n\n\n def test_getDistribChild(self):\n \"\"\"\n L{UserDirectory.getChild} returns a L{ResourceSubscription} instance\n when passed the name of a user suffixed with C{\".twistd\"} who has a\n home directory containing a I{.twistd-web-pb} socket.\n \"\"\"\n home = filepath.FilePath(self.bob[-2])\n home.makedirs()\n web = home.child('.twistd-web-pb')\n request = DummyRequest(['bob'])\n result = self.directory.getChild('bob.twistd', request)\n self.assertIsInstance(result, distrib.ResourceSubscription)\n self.assertEqual(result.host, 'unix')\n self.assertEqual(abspath(result.port), web.path)\n\n\n def test_invalidMethod(self):\n \"\"\"\n L{UserDirectory.render} raises L{UnsupportedMethod} in response to a\n non-I{GET} request.\n \"\"\"\n request = DummyRequest([''])\n request.method = 'POST'\n self.assertRaises(\n server.UnsupportedMethod, self.directory.render, request)\n\n\n def test_render(self):\n \"\"\"\n L{UserDirectory} renders a list of links to available user content\n in response to a I{GET} request.\n \"\"\"\n public_html = filepath.FilePath(self.alice[-2]).child('public_html')\n public_html.makedirs()\n web = filepath.FilePath(self.bob[-2])\n web.makedirs()\n # This really only works if it's a unix socket, but the implementation\n # doesn't currently check for that. It probably should someday, and\n # then skip users with non-sockets.\n web.child('.twistd-web-pb').setContent(b\"\")\n\n request = DummyRequest([''])\n result = _render(self.directory, request)\n def cbRendered(ignored):\n document = parseString(b''.join(request.written))\n\n # Each user should have an li with a link to their page.\n [alice, bob] = document.getElementsByTagName('li')\n self.assertEqual(alice.firstChild.tagName, 'a')\n self.assertEqual(alice.firstChild.getAttribute('href'), 'alice/')\n self.assertEqual(alice.firstChild.firstChild.data, 'Alice (file)')\n self.assertEqual(bob.firstChild.tagName, 'a')\n self.assertEqual(bob.firstChild.getAttribute('href'), 'bob.twistd/')\n self.assertEqual(bob.firstChild.firstChild.data, 'Bob (twistd)')\n\n result.addCallback(cbRendered)\n return result\n\n\n def test_passwordDatabase(self):\n \"\"\"\n If L{UserDirectory} is instantiated with no arguments, it uses the\n L{pwd} module as its password database.\n \"\"\"\n directory = distrib.UserDirectory()\n self.assertIdentical(directory._pwd, pwd)\n if pwd is None:\n test_passwordDatabase.skip = \"pwd module required\"\n\n"},"path":{"kind":"string","value":"stackoverflow/venv/lib/python3.6/site-packages/twisted/web/test/test_distrib.py"},"size":{"kind":"number","value":18288,"string":"18,288"},"nl_text":{"kind":"string","value":"An exception for this test.\nA PB server factory which keeps track of the most recent protocol it\ncreated.\n\n@ivar proto: L{None} or the L{Broker} instance most recently returned\n from C{buildProtocol}.\nTests for L{UserDirectory}, a resource for listing all user resources\navailable on a system.\nVerify that requesting the C{name} child of C{self.directory} results\nin a 404 response.\nSet up a resource on a distrib site using L{ResourcePublisher} and\nthen retrieve it from a L{ResourceSubscription} via an HTTP client.\n\n@param child: The resource to publish using distrib.\n@param **kwargs: Extra keyword arguments to pass to L{Agent.request} when\n requesting the resource.\n\n@return: A L{Deferred} which fires with a tuple consisting of a\n L{twisted.test.proto_helpers.AccumulatingProtocol} containing the\n body of the response and an L{IResponse} with the response itself.\nSet up a resource on a distrib site using L{ResourcePublisher} and\nthen retrieve it from a L{ResourceSubscription} via an HTTP client.\n\n@param child: The resource to publish using distrib.\n@param **kwargs: Extra keyword arguments to pass to L{Agent.request} when\n requesting the resource.\n\n@return: A L{Deferred} which fires with the result of the request.\nSet up a resource on a distrib site using L{ResourcePublisher}.\n\n@param child: The resource to publish using distrib.\n\n@return: A tuple consisting of the host and port on which to contact\n the created site.\nClean up all the event sources left behind by either directly by\ntest methods or indirectly via some distrib API.\nIf there is an error issuing the request to the remote publisher, an\nerror response is returned.\nL{UserDirectory.getChild} returns a L{ResourceSubscription} instance\nwhen passed the name of a user suffixed with C{\".twistd\"} who has a\nhome directory containing a I{.twistd-web-pb} socket.\nL{UserDirectory.getChild} returns a resource which renders a 404\nresponse when passed a string which does not correspond to any known\nuser.\nL{UserDirectory.getChild} returns a L{static.File} instance when passed\nthe name of a user with a home directory containing a I{public_html}\ndirectory.\nL{UserDirectory.getChild} returns a resource which renders a 404\nresponse when passed a string which corresponds to a known user who has\nneither a user directory nor a user distrib socket.\nL{UserDirectory} instances provide L{resource.IResource}.\nL{UserDirectory.render} raises L{UnsupportedMethod} in response to a\nnon-I{GET} request.\nLike L{test_largeWrite}, but for the case where C{render} returns a\nlong string rather than explicitly passing it to L{Request.write}.\nIf a string longer than the Banana size limit is passed to the\nL{distrib.Request} passed to the remote resource, it is broken into\nsmaller strings to be transported over the PB connection.\nWhen a request fails, the string form of the failure is logged.\nIf L{UserDirectory} is instantiated with no arguments, it uses the\nL{pwd} module as its password database.\nL{UserDirectory} renders a list of links to available user content\nin response to a I{GET} request.\nWhen L{twisted.web.distrib.Request}'s fail is called, the failure\nis logged.\nThe request headers are available on the request object passed to a\ndistributed resource's C{render} method.\nThe response code can be set by the request object passed to a\ndistributed resource's C{render} method.\nThe response code and message can be set by the request object passed to\na distributed resource's C{render} method.\nTests for L{twisted.web.distrib}.\n\n Copyright (c) Twisted Matrix Laboratories. See LICENSE for details. site1 is the publisher Using pb.Root as a publisher will cause request calls to fail with an error every time. Just what we want to test. This is the error we caused the request to fail with. It should have been logged. The error page is rendered as HTML. This really only works if it's a unix socket, but the implementation doesn't currently check for that. It probably should someday, and then skip users with non-sockets. Each user should have an li with a link to their page."},"nl_size":{"kind":"number","value":4071,"string":"4,071"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8159045577049255,"string":"0.815905"}}},{"rowIdx":496,"cells":{"content":{"kind":"string","value":"import pathlib\r\n\r\nfrom setuptools import setup\r\n\r\nhere = pathlib.Path(__file__).parent.resolve()\r\n\r\n# Get the long description from the README file\r\nlong_description = (here / \"README.md\").read_text(encoding=\"utf-8\")\r\n\r\nsetup(\r\n name=\"MCsniperPY\",\r\n version=\"0.20.6\",\r\n description=\"Minecraft name sniper written in Python\",\r\n long_description=long_description,\r\n long_description_content_type=\"text/markdown\",\r\n url=\"https://github.com/MCsniperPY/MCsniperPY\",\r\n author=\"Kqzz\",\r\n license=\"MIT\",\r\n packages=[\"mcsniperpy\", \"mcsniperpy.util\", \"mcsniperpy.util.classes\"],\r\n install_requires=[\"typer\", \"aiohttp\", \"colorama\", \"bs4\"],\r\n entry_points={\"console_scripts\": [\"mcsniperpy=mcsniperpy.cli:cli\"]},\r\n python_requires=\">=3.8\",\r\n classifiers=[\r\n \"Development Status :: 4 - Beta\",\r\n \"License :: OSI Approved :: MIT License\", # Again, pick a license\r\n \"Programming Language :: Python :: 3.8\",\r\n \"Programming Language :: Python :: 3.9\",\r\n ],\r\n)\r\n"},"path":{"kind":"string","value":"setup.py"},"size":{"kind":"number","value":1012,"string":"1,012"},"nl_text":{"kind":"string","value":"Get the long description from the README file Again, pick a license"},"nl_size":{"kind":"number","value":67,"string":"67"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8396248817443848,"string":"0.839625"}}},{"rowIdx":497,"cells":{"content":{"kind":"string","value":"'''\nThis module hooks fast.ai Learners to Weights & Biases through a callback.\nRequested logged data can be configured through the callback constructor.\n\nExamples:\n WandbCallback can be used when initializing the Learner::\n\n ```\n from wandb.fastai import WandbCallback\n [...]\n learn = Learner(data, ..., callback_fns=WandbCallback)\n learn.fit(epochs)\n ```\n\n Custom parameters can be given using functools.partial::\n\n ```\n from wandb.fastai import WandbCallback\n from functools import partial\n [...]\n learn = Learner(data, ..., callback_fns=partial(WandbCallback, ...))\n learn.fit(epochs)\n ```\n\n Finally, it is possible to use WandbCallback only when starting\n training. In this case it must be instantiated::\n\n ```\n learn.fit(..., callbacks=WandbCallback(learn))\n ```\n\n or, with custom parameters::\n\n ```\n learn.fit(..., callbacks=WandbCallback(learn, ...))\n ```\n'''\nimport wandb\nimport fastai\nfrom fastai.callbacks import TrackerCallback\nfrom pathlib import Path\nimport random\ntry:\n import matplotlib\n matplotlib.use('Agg') # non-interactive backend (avoid tkinter issues)\n import matplotlib.pyplot as plt\nexcept:\n print('Warning: matplotlib required if logging sample image predictions')\n\n\nclass WandbCallback(TrackerCallback):\n \"\"\"\n Automatically saves model topology, losses & metrics.\n Optionally logs weights, gradients, sample predictions and best trained model.\n\n Args:\n learn (fastai.basic_train.Learner): the fast.ai learner to hook.\n log (str): \"gradients\", \"parameters\", \"all\", or None. Losses & metrics are always logged.\n save_model (bool): save model at the end of each epoch. It will also load best model at the end of training.\n monitor (str): metric to monitor for saving best model. None uses default TrackerCallback monitor value.\n mode (str): \"auto\", \"min\" or \"max\" to compare \"monitor\" values and define best model.\n input_type (str): \"images\" or None. Used to display sample predictions.\n validation_data (list): data used for sample predictions if input_type is set.\n predictions (int): number of predictions to make if input_type is set and validation_data is None.\n seed (int): initialize random generator for sample predictions if input_type is set and validation_data is None.\n \"\"\"\n\n # Record if watch has been called previously (even in another instance)\n _watch_called = False\n\n def __init__(self,\n learn,\n log=\"gradients\",\n save_model=True,\n monitor=None,\n mode='auto',\n input_type=None,\n validation_data=None,\n predictions=36,\n seed=12345):\n\n # Check if wandb.init has been called\n if wandb.run is None:\n raise ValueError(\n 'You must call wandb.init() before WandbCallback()')\n\n # Adapted from fast.ai \"SaveModelCallback\"\n if monitor is None:\n # use default TrackerCallback monitor value\n super().__init__(learn, mode=mode)\n else:\n super().__init__(learn, monitor=monitor, mode=mode)\n self.save_model = save_model\n self.model_path = Path(wandb.run.dir) / 'bestmodel.pth'\n\n self.log = log\n self.input_type = input_type\n self.best = None\n\n # Select items for sample predictions to see evolution along training\n self.validation_data = validation_data\n if input_type and not self.validation_data:\n wandbRandom = random.Random(seed) # For repeatability\n predictions = min(predictions, len(learn.data.valid_ds))\n indices = wandbRandom.sample(range(len(learn.data.valid_ds)),\n predictions)\n self.validation_data = [learn.data.valid_ds[i] for i in indices]\n\n def on_train_begin(self, **kwargs):\n \"Call watch method to log model topology, gradients & weights\"\n\n # Set self.best, method inherited from \"TrackerCallback\" by \"SaveModelCallback\"\n super().on_train_begin()\n\n # Ensure we don't call \"watch\" multiple times\n if not WandbCallback._watch_called:\n WandbCallback._watch_called = True\n\n # Logs model topology and optionally gradients and weights\n wandb.watch(self.learn.model, log=self.log)\n\n def on_epoch_end(self, epoch, smooth_loss, last_metrics, **kwargs):\n \"Logs training loss, validation loss and custom metrics & log prediction samples & save model\"\n\n if self.save_model:\n # Adapted from fast.ai \"SaveModelCallback\"\n current = self.get_monitor_value()\n if current is not None and self.operator(current, self.best):\n print(\n 'Better model found at epoch {} with {} value: {}.'.format(\n epoch, self.monitor, current))\n self.best = current\n\n # Save within wandb folder\n with self.model_path.open('wb') as model_file:\n self.learn.save(model_file)\n\n # Log sample predictions if learn.predict is available\n if self.validation_data:\n try:\n self._wandb_log_predictions()\n except FastaiError as e:\n wandb.termwarn(e.message)\n self.validation_data = None # prevent from trying again on next loop\n except Exception as e:\n wandb.termwarn(\"Unable to log prediction samples.\\n{}\".format(e))\n self.validation_data=None # prevent from trying again on next loop\n\n # Log losses & metrics\n # Adapted from fast.ai \"CSVLogger\"\n logs = {\n name: stat\n for name, stat in list(\n zip(self.learn.recorder.names, [epoch, smooth_loss] +\n last_metrics))\n }\n wandb.log(logs)\n\n def on_train_end(self, **kwargs):\n \"Load the best model.\"\n\n if self.save_model:\n # Adapted from fast.ai \"SaveModelCallback\"\n if self.model_path.is_file():\n with self.model_path.open('rb') as model_file:\n self.learn.load(model_file, purge=False)\n print('Loaded best saved model from {}'.format(\n self.model_path))\n\n def _wandb_log_predictions(self):\n \"Log prediction samples\"\n\n pred_log = []\n\n for x, y in self.validation_data:\n try:\n pred=self.learn.predict(x)\n except:\n raise FastaiError('Unable to run \"predict\" method from Learner to log prediction samples.')\n\n # scalar -> likely to be a category\n if not pred[1].shape:\n pred_log.append(\n wandb.Image(\n x.data,\n caption='Ground Truth: {}\\nPrediction: {}'.format(\n y, pred[0])))\n\n # most vision datasets have a \"show\" function we can use\n elif hasattr(x, \"show\"):\n # log input data\n pred_log.append(\n wandb.Image(x.data, caption='Input data', grouping=3))\n\n # log label and prediction\n for im, capt in ((pred[0], \"Prediction\"),\n (y, \"Ground Truth\")):\n # Resize plot to image resolution\n # from https://stackoverflow.com/a/13714915\n my_dpi = 100\n fig = plt.figure(frameon=False, dpi=my_dpi)\n h, w = x.size\n fig.set_size_inches(w / my_dpi, h / my_dpi)\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.set_axis_off()\n fig.add_axes(ax)\n\n # Superpose label or prediction to input image\n x.show(ax=ax, y=im)\n pred_log.append(wandb.Image(fig, caption=capt))\n plt.close(fig)\n\n # likely to be an image\n elif hasattr(y, \"shape\") and (\n (len(y.shape) == 2) or\n (len(y.shape) == 3 and y.shape[0] in [1, 3, 4])):\n\n pred_log.extend([\n wandb.Image(x.data, caption='Input data', grouping=3),\n wandb.Image(pred[0].data, caption='Prediction'),\n wandb.Image(y.data, caption='Ground Truth')\n ])\n\n # we just log input data\n else:\n pred_log.append(wandb.Image(x.data, caption='Input data'))\n\n wandb.log({\"Prediction Samples\": pred_log}, commit=False)\n\n\nclass FastaiError(wandb.Error):\n pass\n"},"path":{"kind":"string","value":"wandb/fastai/__init__.py"},"size":{"kind":"number","value":8841,"string":"8,841"},"nl_text":{"kind":"string","value":"Automatically saves model topology, losses & metrics.\nOptionally logs weights, gradients, sample predictions and best trained model.\n\nArgs:\n learn (fastai.basic_train.Learner): the fast.ai learner to hook.\n log (str): \"gradients\", \"parameters\", \"all\", or None. Losses & metrics are always logged.\n save_model (bool): save model at the end of each epoch. It will also load best model at the end of training.\n monitor (str): metric to monitor for saving best model. None uses default TrackerCallback monitor value.\n mode (str): \"auto\", \"min\" or \"max\" to compare \"monitor\" values and define best model.\n input_type (str): \"images\" or None. Used to display sample predictions.\n validation_data (list): data used for sample predictions if input_type is set.\n predictions (int): number of predictions to make if input_type is set and validation_data is None.\n seed (int): initialize random generator for sample predictions if input_type is set and validation_data is None.\nLog prediction samples\nLogs training loss, validation loss and custom metrics & log prediction samples & save model\nCall watch method to log model topology, gradients & weights\nLoad the best model.\nThis module hooks fast.ai Learners to Weights & Biases through a callback.\nRequested logged data can be configured through the callback constructor.\n\nExamples:\n WandbCallback can be used when initializing the Learner::\n\n ```\n from wandb.fastai import WandbCallback\n [...]\n learn = Learner(data, ..., callback_fns=WandbCallback)\n learn.fit(epochs)\n ```\n\n Custom parameters can be given using functools.partial::\n\n ```\n from wandb.fastai import WandbCallback\n from functools import partial\n [...]\n learn = Learner(data, ..., callback_fns=partial(WandbCallback, ...))\n learn.fit(epochs)\n ```\n\n Finally, it is possible to use WandbCallback only when starting\n training. In this case it must be instantiated::\n\n ```\n learn.fit(..., callbacks=WandbCallback(learn))\n ```\n\n or, with custom parameters::\n\n ```\n learn.fit(..., callbacks=WandbCallback(learn, ...))\n ```\n\n non-interactive backend (avoid tkinter issues) Record if watch has been called previously (even in another instance) Check if wandb.init has been called Adapted from fast.ai \"SaveModelCallback\" use default TrackerCallback monitor value Select items for sample predictions to see evolution along training For repeatability Set self.best, method inherited from \"TrackerCallback\" by \"SaveModelCallback\" Ensure we don't call \"watch\" multiple times Logs model topology and optionally gradients and weights Adapted from fast.ai \"SaveModelCallback\" Save within wandb folder Log sample predictions if learn.predict is available prevent from trying again on next loop prevent from trying again on next loop Log losses & metrics Adapted from fast.ai \"CSVLogger\" Adapted from fast.ai \"SaveModelCallback\" scalar -> likely to be a category most vision datasets have a \"show\" function we can use log input data log label and prediction Resize plot to image resolution from https://stackoverflow.com/a/13714915 Superpose label or prediction to input image likely to be an image we just log input data"},"nl_size":{"kind":"number","value":3254,"string":"3,254"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7515658140182495,"string":"0.751566"}}},{"rowIdx":498,"cells":{"content":{"kind":"string","value":"import os\nfrom setuptools import setup, Extension\nfrom setuptools.command.build_ext import build_ext\nfrom Cython.Distutils import build_ext\nimport numpy as np\nfrom os.path import join as pjoin\nfrom setup_cuda import cuda_setup\n\nmpi_compile_args = os.popen(\"mpic++ --showme:compile\").read().strip().split(' ')\nmpi_link_args = os.popen(\"mpic++ --showme:link\").read().strip().split(' ')\n\n\ndef find_in_path(name, path):\n \"\"\"Find a file in a search path\"\"\"\n\n # Adapted fom http://code.activestate.com/recipes/52224\n for dir in path.split(os.pathsep):\n binpath = pjoin(dir, name)\n if os.path.exists(binpath):\n return os.path.abspath(binpath)\n return None\n\n\ntry:\n numpy_include = np.get_include()\nexcept AttributeError:\n numpy_include = np.get_numpy_include()\n\nnvcc = find_in_path('nvcc', os.environ['PATH'])\nif isinstance(nvcc, str):\n print('CUDA')\n # setup(name='PackageName',\n # author='Nina Herrmann',\n # version='1.0',\n # description='This is a package for Muesli',\n # ext_modules=cythonize(cuda_setup.get_module()),\n # cmdclass={'build_ext': cuda_setup.custom_build_ext()}\n # )\nelse:\n module = Extension('_da', sources=['da.cxx', 'da_wrap.cxx'],\n include_dirs=[np.get_include(), 'src'],\n library_dirs=['/usr/include/boost/'],\n language=\"c++\",\n swig_opts=['-c++'],\n libraries=['/usr/include/boost/chrono'],\n extra_compile_args=([\"-fopenmp\"] + mpi_compile_args),\n extra_link_args=([\"-fopenmp\"] + mpi_link_args)\n )\n\n setup(name='da',\n author='Nina Herrmann',\n version='1.0',\n description='This is a package for Muesli',\n ext_modules=[module],\n py_modules=[\"da\"]\n )\n"},"path":{"kind":"string","value":"swig_muesli/muesli/da/setup_da.py"},"size":{"kind":"number","value":1900,"string":"1,900"},"nl_text":{"kind":"string","value":"Find a file in a search path\n\n Adapted fom http://code.activestate.com/recipes/52224 setup(name='PackageName', author='Nina Herrmann', version='1.0', description='This is a package for Muesli', ext_modules=cythonize(cuda_setup.get_module()), cmdclass={'build_ext': cuda_setup.custom_build_ext()} )"},"nl_size":{"kind":"number","value":333,"string":"333"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.39768296480178833,"string":"0.397683"}}},{"rowIdx":499,"cells":{"content":{"kind":"string","value":"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport numpy as np\nfrom sklearn.decomposition import PCA\n\nfrom reco_utils.dataset.download_utils import maybe_download\nfrom IPython import embed\n\ndef length_normalize(matrix):\n \"\"\"Length normalize the matrix\n\n Args:\n matrix (np.ndarray): Input matrix that needs to be normalized\n\n Returns:\n Normalized matrix\n \"\"\"\n norms = np.sqrt(np.sum(matrix**2, axis=1))\n norms[norms == 0] = 1\n return matrix / norms[:, np.newaxis]\n\n\ndef mean_center(matrix):\n \"\"\"Performs mean centering across axis 0\n\n Args:\n matrix (np.ndarray): Input matrix that needs to be mean centered\n \"\"\"\n avg = np.mean(matrix, axis=0)\n matrix -= avg\n\n\ndef reduce_dims(matrix, target_dim):\n \"\"\"Reduce dimensionality of the data using PCA.\n\n Args:\n matrix (np.ndarray): Matrix of the form (n_sampes, n_features)\n target_dim (uint): Dimension to which n_features should be reduced to.\n\n \"\"\"\n model = PCA(n_components=target_dim)\n model.fit(matrix)\n return model.transform(matrix)\n"},"path":{"kind":"string","value":"reco_utils/recommender/geoimc/geoimc_utils.py"},"size":{"kind":"number","value":1124,"string":"1,124"},"nl_text":{"kind":"string","value":"Length normalize the matrix\n\nArgs:\n matrix (np.ndarray): Input matrix that needs to be normalized\n\nReturns:\n Normalized matrix\nPerforms mean centering across axis 0\n\nArgs:\n matrix (np.ndarray): Input matrix that needs to be mean centered\nReduce dimensionality of the data using PCA.\n\nArgs:\n matrix (np.ndarray): Matrix of the form (n_sampes, n_features)\n target_dim (uint): Dimension to which n_features should be reduced to.\n\n Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License."},"nl_size":{"kind":"number","value":532,"string":"532"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7829679846763611,"string":"0.782968"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":4,"numItemsPerPage":100,"numTotalItems":8000,"offset":400,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1Nzk2MjkwOCwic3ViIjoiL2RhdGFzZXRzL2xvdWJuYWJubC9sYW5ndWFnZV9pZF9iaWdjb2RlIiwiZXhwIjoxNzU3OTY2NTA4LCJpc3MiOiJodHRwczovL2h1Z2dpbmdmYWNlLmNvIn0.CxfBu-bC-GUcS-fpYt7N6XiscjoEKMbfBqk2OnaBAQyyu-p_f_HLj57-m-sKmzEVVZAWsOehmMiIGD6VN_iiCw","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
content
stringlengths
27
928k
path
stringlengths
4
230
size
int64
27
928k
nl_text
stringlengths
21
396k
nl_size
int64
21
396k
nl_language
stringlengths
2
3
nl_language_score
float64
0.04
1
#!/usr/bin/env python # -*- coding: utf-8 -*- import cv2 import numpy as np from math import cos, sin, pi from tqdm import tqdm import open3d as o3d def render(pointcloud_file_path, estimate_normals_radius, estimate_normals_max_nn): pointcloud = o3d.io.read_point_cloud(pointcloud_file_path, print_progress=True) pointcloud.estimate_normals( search_param=o3d.geometry.KDTreeSearchParamHybrid( radius=estimate_normals_radius, max_nn=estimate_normals_max_nn)) o3d.visualization.draw_geometries([pointcloud]) return True class Renderer(object): def __init__(self): self.vis = o3d.visualization.Visualizer() self.render_center = None self.euler_angle = [0, 0, 0] return def getRotationMatrixFromEulerAngle(self, euler_angle): R_x = np.array([ [1, 0, 0], [0, cos(euler_angle[0]), -sin(euler_angle[0])], [0, sin(euler_angle[0]), cos(euler_angle[0])] ]) R_y = np.array([ [cos(euler_angle[1]), 0, sin(euler_angle[1])], [0, 1, 0], [-sin(euler_angle[1]), 0, cos(euler_angle[1])] ]) R_z = np.array([ [cos(euler_angle[2]), -sin(euler_angle[2]), 0], [sin(euler_angle[2]), cos(euler_angle[2]), 0], [0, 0, 1] ]) rotation_matrix = np.dot(R_z, np.dot(R_y, R_x)) return rotation_matrix def getRotateDirection(self, direction_vector, euler_angle): np_direction_vector = np.array(direction_vector) direction_vector_norm = np.linalg.norm(np_direction_vector) if direction_vector_norm == 0: print("[ERROR][Renderer::getRotateDirection]") print("\t direction_vector_norm is 0!") return None np_unit_direction_vector = np_direction_vector / direction_vector_norm rotation_matrix = self.getRotationMatrixFromEulerAngle(euler_angle) rotate_direction = np.dot(rotation_matrix, np_unit_direction_vector) return rotate_direction.tolist() def rotateVis(self, delta_rotate_angle): self.euler_angle[0] = 0 self.euler_angle[1] = -10 * pi / 180.0 self.euler_angle[2] += delta_rotate_angle * pi / 180.0 ctr = self.vis.get_view_control() front_direction = self.getRotateDirection( [1, 0, 0], self.euler_angle) ctr.set_front(front_direction) up_direction = self.getRotateDirection( [0, 0, 1], self.euler_angle) ctr.set_up(up_direction) ctr.set_lookat(self.render_center) # ctr.set_zoom(0.5) return True def render(self, show_labels, scene_pointcloud_file_path=None): delta_rotate_angle = 0.5 if scene_pointcloud_file_path is not None: print("start reading floor and wall...") self.splitLabeledPoints(scene_pointcloud_file_path) rendered_pointcloud = o3d.geometry.PointCloud() render_points = [] render_colors = [] print("start create rendered pointcloud...") for i in tqdm(range(len(self.pointcloud_list))): points = np.asarray(self.pointcloud_list[i].points).tolist() if len(points) == 0: continue for point in points: render_points.append(point) render_colors.append(self.d3_40_colors_rgb[i % len(self.d3_40_colors_rgb)] / 255.0) if scene_pointcloud_file_path is not None: print("start create rendered floor...") for wall_point in tqdm(self.labeled_point_cluster_list[0]): if abs(wall_point[2]) > 0.01: continue render_points.append(wall_point) render_colors.append(np.array([132, 133, 135], dtype=np.uint8) / 255.0) rendered_pointcloud.points = o3d.utility.Vector3dVector(np.array(render_points)) rendered_pointcloud.colors = o3d.utility.Vector3dVector(np.array(render_colors)) rendered_pointcloud.estimate_normals( search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30)) self.render_center = rendered_pointcloud.get_axis_aligned_bounding_box().get_center() self.vis.create_window(window_name="Open3D RenderObject") render_option = self.vis.get_render_option() render_option.background_color = np.array([1, 1, 1]) render_option.point_size = 1 self.vis.add_geometry(rendered_pointcloud) while True: self.rotateVis(delta_rotate_angle) # self.vis.update_geometry() self.vis.poll_events() self.vis.update_renderer() if ord('q') == cv2.waitKey(1): break self.vis.destroy_window() return True def saveRender(self, output_video_file_path): fps = 30 video_width = 1920 video_height = 1080 delta_rotate_angle = 0.5 if scene_pointcloud_file_path is not None: print("start reading floor and wall...") self.splitLabeledPoints(scene_pointcloud_file_path) rendered_pointcloud = o3d.geometry.PointCloud() render_points = [] render_colors = [] print("start create rendered pointcloud...") for i in tqdm(range(len(self.pointcloud_list))): points = np.asarray(self.pointcloud_list[i].points).tolist() if len(points) == 0: continue for point in points: render_points.append(point) render_colors.append(self.d3_40_colors_rgb[i % len(self.d3_40_colors_rgb)] / 255.0) if scene_pointcloud_file_path is not None: print("start create rendered floor...") for wall_point in tqdm(self.labeled_point_cluster_list[0]): if abs(wall_point[2]) > 0.01: continue render_points.append(wall_point) render_colors.append(np.array([132, 133, 135], dtype=np.uint8) / 255.0) rendered_pointcloud.points = o3d.utility.Vector3dVector(np.array(render_points)) rendered_pointcloud.colors = o3d.utility.Vector3dVector(np.array(render_colors)) self.render_center = rendered_pointcloud.get_axis_aligned_bounding_box().get_center() self.vis.create_window(window_name="Open3D RenderObject") render_option = self.vis.get_render_option() render_option.background_color = np.array([1, 1, 1]) render_option.point_size = 1 self.vis.add_geometry(rendered_pointcloud) fourcc = cv2.VideoWriter_fourcc(*'MP4V') out = cv2.VideoWriter(output_video_file_path, fourcc, fps, (video_width, video_height)) for i in range(int(360 / delta_rotate_angle)): self.rotateVis(0.5) # self.vis.update_geometry() self.vis.poll_events() self.vis.update_renderer() open3d_image = np.asarray(self.vis.capture_screen_float_buffer()) * 255.0 cv_image = cv2.cvtColor(open3d_image, cv2.COLOR_RGB2BGR).astype(np.uint8) out.write(cv_image) self.vis.destroy_window() out.release() return True
PointCloudClass/renderer.py
7,244
!/usr/bin/env python -*- coding: utf-8 -*- ctr.set_zoom(0.5) self.vis.update_geometry() self.vis.update_geometry()
117
es
0.093506
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. '''Test for parse_ebi.py. Run "python3 parse_ebi_test.py" ''' import copy import unittest import parse_ebi CONST_TEST_TEXT = '''[Term] id: MI:0001 name: interaction detection method def: "Method to determine the interaction." [PMID:14755292] [Term] id: MI:0045 name: experimental interaction detection def: "Methods based" [PMID:14755292] is_a: MI:0001 ! interaction detection method [Term] id: MI:0401 name: biochemical def: "The application" [PMID:14755292] is_a: MI:0045 ! experimental interaction detection [Term] id: MI:0091 name: chromatography technology def: "Used to separate" [PMID:14755292] is_a: MI:0401 ! biochemical''' CONST_ID_TO_CLASS_NAME = {'MI:0001': 'InteractionDetectionMethod', 'MI:0091': 'ChromatographyTechnology', 'MI:0045': 'ExperimentalInteractionDetection', 'MI:0401': 'Biochemical'} CONST_ID_TO_NODE = {} CONST_ID_TO_NODE_NO_RELATION = {} for key in ['MI:0001', 'MI:0045', 'MI:0401', 'MI:0091']: CONST_ID_TO_NODE[key] = parse_ebi.Node(key) CONST_ID_TO_NODE_NO_RELATION[key] = parse_ebi.Node(key) CONST_ID_TO_NODE['MI:0001'].child_list.append(CONST_ID_TO_NODE['MI:0045']) CONST_ID_TO_NODE['MI:0045'].parent_list.append(CONST_ID_TO_NODE['MI:0001']) CONST_ID_TO_NODE['MI:0045'].child_list.append(CONST_ID_TO_NODE['MI:0401']) CONST_ID_TO_NODE['MI:0401'].parent_list.append(CONST_ID_TO_NODE['MI:0045']) CONST_ID_TO_NODE['MI:0401'].child_list.append(CONST_ID_TO_NODE['MI:0091']) CONST_ID_TO_NODE['MI:0091'].parent_list.append(CONST_ID_TO_NODE['MI:0401']) CONST_SCHEMA1 = '''Node: dcid:ExperimentalInteractionDetection typeOf: dcs:InteractionTypeEnum name: "ExperimentalInteractionDetection" psimiID: "MI:0045" description: "Methods base" pubMedID: "14755292" descriptionUrl: "http://psidev.info/groups/controlled-vocabularies"''' CONST_SCHEMA2 = '''Node: dcid:Biochemical typeOf: dcs:InteractionTypeEnum name: "Biochemical" psimiID: "MI:0401" description: "The applicatio" pubMedID: "14755292" specializationOf: dcs:ExperimentalInteractionDetection descriptionUrl: "http://psidev.info/groups/controlled-vocabularies"''' def get_file_terms(file): "Ruturns a list of text blocks." file_terms = file.split('\n\n') file_terms = [term_text.split('\n') for term_text in file_terms if term_text.startswith('[Term]')] return file_terms CONST_FILE_TERMS = get_file_terms(CONST_TEST_TEXT) CONST_INTERACTION_TYPE_ID_SET = set(['MI:0045', 'MI:0091', 'MI:0401']) class TestParseEbi(unittest.TestCase): """Test the functions in parse_ebi.py""" def test_get_id_maps(self): """Test function get_id_maps. Note that id_to_node here doesn't have parent_child relation, so only map keys are tested.""" id_to_class_name, id_to_node = parse_ebi.get_id_maps(CONST_FILE_TERMS) self.assertEqual(id_to_class_name, CONST_ID_TO_CLASS_NAME) self.assertEqual(id_to_node.keys(), CONST_ID_TO_NODE_NO_RELATION.keys()) def test_build_child_parent_link(self): """Test function build_child_parent_link by checking the values of child_list and parent_list.""" id_to_node = copy.deepcopy(CONST_ID_TO_NODE_NO_RELATION) id_to_node = parse_ebi.build_child_parent_link(CONST_FILE_TERMS, id_to_node) def get_node_value_set(node_list): value_set = set() for node in node_list: value_set.add(node.value) return value_set for id_key in id_to_node: parent_value_set = get_node_value_set(id_to_node[id_key].parent_list) const_parent_value_set = get_node_value_set(CONST_ID_TO_NODE[id_key].parent_list) child_value_set = get_node_value_set(id_to_node[id_key].child_list) const_child_value_set = get_node_value_set(CONST_ID_TO_NODE[id_key].child_list) self.assertEqual(parent_value_set, const_parent_value_set) self.assertEqual(child_value_set, const_child_value_set) def test_TreeBuilder(self): """Test TreeBuilder class.""" dfs_caller = parse_ebi.TreeBuilder(CONST_ID_TO_NODE) INTERACTION_TYPE_ROOT = 'MI:0001' interaction_type_id_set = dfs_caller.get_subset_id(INTERACTION_TYPE_ROOT) self.assertEqual(interaction_type_id_set, CONST_INTERACTION_TYPE_ID_SET) def test_get_schema_from_text(self): """Test function get_schema_from_text by comparing the final schema.""" new_source_map = {'references':{}} term = CONST_FILE_TERMS[1] schema_res = parse_ebi.get_schema_from_text(term, CONST_ID_TO_NODE, new_source_map, CONST_ID_TO_CLASS_NAME, CONST_INTERACTION_TYPE_ID_SET, set(), set()) self.assertEqual(schema_res[0], CONST_SCHEMA1) term = CONST_FILE_TERMS[2] schema_res = parse_ebi.get_schema_from_text(term, CONST_ID_TO_NODE, new_source_map, CONST_ID_TO_CLASS_NAME, CONST_INTERACTION_TYPE_ID_SET, set(), set()) self.assertEqual(schema_res[0], CONST_SCHEMA2) if __name__ == '__main__': unittest.main()
scripts/proteinInteractionEBI/parse_ebi_test.py
5,814
Test the functions in parse_ebi.py Ruturns a list of text blocks. Test TreeBuilder class. Test function build_child_parent_link by checking the values of child_list and parent_list. Test function get_id_maps. Note that id_to_node here doesn't have parent_child relation, so only map keys are tested. Test function get_schema_from_text by comparing the final schema. Test for parse_ebi.py. Run "python3 parse_ebi_test.py" Copyright 2020 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
971
en
0.787593
import os import pathlib from dotenv import load_dotenv, find_dotenv from fpdf import FPDF #envelope size: 110 by 145 mm # Elliot Torres # 4321 Loser Road # La Crescenta, CA 91214 # # Ryan Lee # 1234 Boomer Road # La Crescenta, CA 91214 load_dotenv(find_dotenv()) # types out address on envelope def sendmail( FULLNAME, ADDRESS_LINE_ONE, CITY, STATE, ZIPCODE, PERSON=None, ADDRESS_LINE_TWO=None ): if PERSON is None: sender_name = os.environ['sender_name'] sender_addr1 = os.environ['sender_addr1'] sender_addr2 = os.environ['sender_addr2'] else: sender_name = PERSON.fullname sender_addr1 = f'{PERSON.addrline1}' sender_addr2 = f'{PERSON.city}, {PERSON.state} {PERSON.zipcode}' pdf = FPDF('L', 'mm', (110, 145)) pdf.add_page() pdf.set_font('Times', '', 9.8) pdf.set_margins(0, 0, 0) pdf.text(7, 7.5, sender_name) pdf.text(7, 10.5, sender_addr1) pdf.text(7, 13.5, sender_addr2) pdf.set_font('Times', '', 14) if ADDRESS_LINE_TWO is None: pdf.text(44, 78, FULLNAME) pdf.text(44, 82, ADDRESS_LINE_ONE) pdf.text(44, 86, f'{CITY}, {STATE} {ZIPCODE}') else: pdf.text(44, 78, FULLNAME) pdf.text(44, 82, f'{ADDRESS_LINE_ONE}, {ADDRESS_LINE_TWO}') pdf.text(44, 86, f'{CITY}, {STATE} {ZIPCODE}') # types out message on back fo envelope pdf.add_page() pdf.set_margins(0, 0, 0) pdf.text(10, 78, f"Happy Birthday {FULLNAME}!") pdf.text(10, 82, "Have a wonderful day and enjoy your sweet!") pdf.text(10, 86, "-CVHS Bday Team") envelope_file = pathlib.Path('envelope.pdf') if envelope_file.exists(): envelope_file.unlink() pdf.output('envelope.pdf', dest='F').encode('latin-1') os.system("lp -d printer envelope.pdf")
bdaybot/snailmail.py
1,836
envelope size: 110 by 145 mm Elliot Torres 4321 Loser Road La Crescenta, CA 91214 Ryan Lee 1234 Boomer Road La Crescenta, CA 91214 types out address on envelope types out message on back fo envelope
198
en
0.638595
# Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None class BST: def __init__(self, val): self.val = val self.left = None self.right = None # Average: O(log(n)) time | O(1) space # Worst: O(n) time | O(1) space def insert(self, val): currentNode = self while True: if val < currentNode.val: if currentNode.left is None: currentNode.left = BST(val) break else: currentNode = currentNode.left else: if currentNode.right is None: currentNode.right = BST(val) break else: currentNode = currentNode.right return self import sys class Solution(object): def isValidBST(self, root): """ :type root: TreeNode :rtype: bool """ MAX = sys.maxint MIN = -sys.maxint - 1 return self.isValidBSTHelper(root, MIN, MAX) def isValidBSTHelper(self, root, minValue, maxValue): if root is None: return True if root.left is None and root.right is None: return minValue < root.val < maxValue if root.val <= minValue or root.val >= maxValue: return False leftSubtreeIsValid = self.isValidBSTHelper(root.left, minValue, root.val) rightSubtreeIsValid = self.isValidBSTHelper(root.right, root.val, maxValue) return leftSubtreeIsValid and rightSubtreeIsValid # driver/test code # test_tree = BST(100).insert(5).insert(15).insert(5).insert(2).insert(1).insert(22) \ # .insert(1).insert(1).insert(3).insert(1).insert(1).insert(502).insert(55000) \ # .insert(204).insert(205).insert(207).insert(206).insert(208).insert(203) \ # .insert(-51).insert(-403).insert(1001).insert(57).insert(60).insert(4500) test_tree = BST(2).insert(1).insert(4).insert(None).insert(None).insert(3).insert(6) sol = Solution() is_valid_bst = sol.isValidBST(test_tree) print("Is BST valid ? - ", is_valid_bst)
leetcode.com/python/98_Validate_Binary_Search_Tree.py
2,208
:type root: TreeNode :rtype: bool Definition for a binary tree node. class TreeNode(object): def __init__(self, x): self.val = x self.left = None self.right = None Average: O(log(n)) time | O(1) space Worst: O(n) time | O(1) space driver/test code test_tree = BST(100).insert(5).insert(15).insert(5).insert(2).insert(1).insert(22) \ .insert(1).insert(1).insert(3).insert(1).insert(1).insert(502).insert(55000) \ .insert(204).insert(205).insert(207).insert(206).insert(208).insert(203) \ .insert(-51).insert(-403).insert(1001).insert(57).insert(60).insert(4500)
602
zh
0.099228
from enum import Enum from typing import List, Optional, Type, Union import click from ..types import NotSet class CSVOption(click.Choice): def __init__(self, choices: Type[Enum]): self.enum = choices super().__init__(tuple(choices.__members__)) def convert( self, value: str, param: Optional[click.core.Parameter], ctx: Optional[click.core.Context] ) -> List[Enum]: items = [item for item in value.split(",") if item] invalid_options = set(items) - set(self.choices) if not invalid_options and items: return [self.enum[item] for item in items] # Sort to keep the error output consistent with the passed values sorted_options = ", ".join(sorted(invalid_options, key=items.index)) available_options = ", ".join(self.choices) self.fail(f"invalid choice(s): {sorted_options}. Choose from {available_options}") not_set = NotSet() class OptionalInt(click.types.IntRange): def convert( # type: ignore self, value: str, param: Optional[click.core.Parameter], ctx: Optional[click.core.Context] ) -> Union[int, NotSet]: if value == "None": return not_set try: int(value) return super().convert(value, param, ctx) except ValueError: self.fail("%s is not a valid integer or None" % value, param, ctx)
src/schemathesis/cli/options.py
1,392
Sort to keep the error output consistent with the passed values type: ignore
76
en
0.618907
import nltk # nltk.download('stopwords') #if doesnt work download all these first # nltk.download('punkt') # nltk.download('averaged_perceptron_tagger') from nltk.corpus import stopwords from nltk.tokenize import word_tokenize, sent_tokenize stop_words = set(stopwords.words('english')) meaning_with_example = { "CC" : "coordinating conjunction", "CD" : "cardinal digit", "DT" : "determiner", "EX" : "existential there (like: “there is” … think of it like “there exists”)", "FW" : "foreign word", "IN" : "preposition/subordinating conjunction", "JJ" : "adjective ‘big’", "JJR": "adjective, comparative ‘bigger’", "JJS": "adjective, superlative ‘biggest’", "LS" : "list marker 1)", "MD" : "modal could, will", "NN" : "noun, singular ‘desk’", "NNS": "noun plural ‘desks’", "NNP": "proper noun, singular ‘Harrison’", "NNPS": "proper noun, plural ‘Americans’", "PDT": "predeterminer ‘all the kids’", "POS": "possessive ending parent‘s", "PRP": "personal pronoun I, he, she", "PRP$": "possessive pronoun my, his, hers", "RB" : "adverb very, silently,", "RBR": "adverb, comparative better", "RBS": "adverb, superlative best", "RP" : "particle give up", "TO" : "to go ‘to‘ the store.", "UH" : "interjection errrrrrrrm", "VB" : "verb, base form take", "VBD": "verb, past tense took", "VBG": "verb, gerund/present participle taking", "VBN": "verb, past participle taken", "VBP": "verb, sing. present, non-3d take", "VBZ": "verb, 3rd person sing. present takes", "WDT": "wh-determiner which", "WP" : "wh-pronoun who, what", "WP$": "possessive wh-pronoun whose", "WRB": "wh-abverb where, when", "," : "comma", "." : "full stop" } meaning = { "CC" : "coordinating conjunction", "CD" : "cardinal digit", "DT" : "determiner", "EX" : "existential there", "FW" : "foreign word", "IN" : "preposition/subordinating conjunction", "JJ" : "adjective", "JJR": "adjective, comparative", "JJS": "adjective, superlative", "LS" : "list marker", "MD" : "modal could, will", "NN" : "noun singular", "NNS": "noun plural", "NNP": "proper noun, singular", "NNPS": "proper noun, plural", "PDT": "predeterminer", "POS": "possessive ending", "PRP": "personal pronoun", "PRP$": "possessive pronoun", "RB" : "adverb ", "RBR": "adverb, comparative ", "RBS": "adverb, superlative ", "RP" : "particle ", "TO" : "to go ‘to‘ the store.", "UH" : "interjection", "VB" : "verb base form ", "VBD": "verb past tense ", "VBG": "verb gerund/present participle", "VBN": "verb past participle ", "VBP": "verb sing. present", "VBZ": "verb 3rd person sing. present ", "WDT": "wh-determiner which", "WP" : "wh-pronoun who, what", "WP$": "possessive wh-pronoun whose", "WRB": "wh-abverb where, when" } def get_part_of_speech(sentence): cleaned=[] tokenized = sent_tokenize(sentence) for i in tokenized: wordsList = nltk.word_tokenize(i) wordsList = [w for w in wordsList if not w in stop_words] tagged = nltk.pos_tag(wordsList) for pair in tagged: c_pair=[] c_pair.append(pair[0]) try : c_pair.append(meaning[pair[1]]) except : c_pair.append("Punctuation") cleaned.append(c_pair) return cleaned #print(get_part_of_speech("Sukanya, Rajib and Naba are my good friends."))
part_of_speech.py
3,323
nltk.download('stopwords') if doesnt work download all these first nltk.download('punkt') nltk.download('averaged_perceptron_tagger')print(get_part_of_speech("Sukanya, Rajib and Naba are my good friends."))
207
en
0.658869
from __future__ import print_function import sys from metapub import PubMedFetcher from metapub import FindIt # examples of different formats: # 18612690: PubMedArticle with multiple AbstractText sections # 1234567: PubMedArticle with no abstract whatsoever # 20301546: PubMedBookArticle from GeneReviews #### import logging logging.getLogger("requests").setLevel(logging.WARNING) logging.getLogger("eutils").setLevel(logging.WARNING) ch = logging.StreamHandler() logging.getLogger("metapub").setLevel(logging.INFO) logging.getLogger("metapub").addHandler(ch) #### try: pmid = sys.argv[1] except IndexError: print('Supply a pubmed ID as the argument to this script.') print('') print('Example: python demo_pubmed.py 123456') sys.exit() article = PubMedFetcher().article_by_pmid(pmid) print('') print(article.pmid, article.title) print('') print('authors: %s' % ','.join(article.authors)) print('journal: %s' % article.journal) print('') excerpt = '(empty)' if article.abstract is None else article.abstract[:100] + '[...]' print('abstract: %s' % excerpt) print('') print('pii:',str(article.pii)) print('doi:',str(article.doi)) print('pmc:',str(article.pmc)) print('volume:',str(article.volume)) print('issue:',str(article.issue)) print('pages:',str(article.pages)) print('year:',str(article.year)) print('') print('MeSH headings: ') for DUI in list(article.mesh.keys()): print('\t', DUI, article.mesh[DUI]['descriptor_name'], article.mesh.get('qualifier_name', '')) if article.publication_types: print('\nPublication Type Information') for pt in list(article.publication_types.keys()): print('\t', pt, article.publication_types[pt]) if article.chemicals: print('\nChemical List') for DUI in list(article.chemicals.keys()): print('\t', DUI, article.chemicals[DUI]['substance_name']) if article.grants: print('\nGrant Information') for gr in grants: print('\t', gr) if article.history: print('\nArticle History') for hist in article.history: print('\t', hist, article.history[hist]) print('') print('FindIt results:') source = FindIt(pmid=pmid) print('\tdoi:', source.doi) print('\turl:', source.url) print('\tbackup:', source.backup_url) print('\treason:', source.reason) print(article.citation_html)
bin/demo_get_PubMedArticle_by_pmid.py
2,304
examples of different formats: 18612690: PubMedArticle with multiple AbstractText sections 1234567: PubMedArticle with no abstract whatsoever 20301546: PubMedBookArticle from GeneReviews
187
en
0.569475
from skimage.measure import find_contours from skimage import io from skimage.color import rgb2gray from matplotlib import pyplot as plt image = io.imread('contour_finding_test.png') # image = io.imread('FlowchartDiagram.png') image = rgb2gray(image) out = find_contours(image) print(len(out)) # Find contours at a constant value of 0.8 # contours = find_contours(image, 0.8) contours = find_contours(image, ) # Display the image and plot all contours found fig, ax = plt.subplots() ax.imshow(image, cmap=plt.cm.gray) for contour in contours: ax.plot(contour[:, 1], contour[:, 0], linewidth=2) ax.axis('image') ax.set_xticks([]) ax.set_yticks([]) plt.show() # io.imshow(image) # io.show()
pyimage/contour.py
698
image = io.imread('FlowchartDiagram.png') Find contours at a constant value of 0.8 contours = find_contours(image, 0.8) Display the image and plot all contours found io.imshow(image) io.show()
192
en
0.572217
import numpy as np from numpy import (reciprocal, einsum, maximum, minimum, zeros_like, atleast_1d, squeeze) from scipy.linalg import eig, eigvals, matrix_balance, norm from harold._classes import Transfer, transfer_to_state from harold._discrete_funcs import discretize from harold._arg_utils import _check_for_state, _check_for_state_or_transfer __all__ = ['simulate_linear_system', 'simulate_step_response', 'simulate_impulse_response'] def simulate_linear_system(sys, u, t=None, x0=None, per_channel=False): """ Compute the linear model response to an input array sampled at given time instances. Parameters ---------- sys : {State, Transfer} The system model to be simulated u : array_like The real-valued input sequence to force the model. 1D arrays for single input models and 2D arrays that has as many columns as the number of inputs are valid inputs. t : array_like, optional The real-valued sequence to be used for the evolution of the system. The values should be equally spaced otherwise an error is raised. For discrete time models increments different than the sampling period also raises an error. On the other hand for discrete models this can be omitted and a time sequence will be generated automatically. x0 : array_like, optional The initial condition array. If omitted an array of zeros is assumed. Note that Transfer models by definition assume zero initial conditions and will raise an error. per_channel : bool, optional If this is set to True and if the system has multiple inputs, the response of each input is returned individually. For example, if a system has 4 inputs and 3 outputs then the response shape becomes (num, p, m) instead of (num, p) where k-th slice (:, :, k) is the response from the k-th input channel. For single input systems, this keyword has no effect. Returns ------- yout : ndarray The resulting response array. The array is 1D if sys is SISO and has p columns if sys has p outputs. tout : ndarray The time sequence used in the simulation. If the parameter t is not None then a copy of t is given. Notes ----- For Transfer models, first conversion to a state model is performed and then the resulting model is used for computations. """ _check_for_state_or_transfer(sys) # Quick initial condition checks if x0 is not None: if sys._isgain: raise ValueError('Static system models can\'t have initial ' 'conditions set.') if isinstance(sys, Transfer): raise ValueError('Transfer models can\'t have initial conditions ' 'set.') x0 = np.asarray(x0, dtype=float).squeeze() if x0.ndim > 1: raise ValueError('Initial condition can only be a 1D array.') else: x0 = x0[:, None] if sys.NumberOfStates != x0.size: raise ValueError('The initial condition size does not match the ' 'number of states of the model.') # Always works with State Models try: _check_for_state(sys) except ValueError: sys = transfer_to_state(sys) n, m = sys.NumberOfStates, sys.shape[1] is_discrete = sys.SamplingSet == 'Z' u = np.asarray(u, dtype=float).squeeze() if u.ndim == 1: u = u[:, None] t = _check_u_and_t_for_simulation(m, sys._dt, u, t, is_discrete) # input and time arrays are regular move on # Static gains are simple matrix multiplications with no x0 if sys._isgain: if sys._isSISO: yout = u * sys.d.squeeze() else: # don't bother for single inputs if m == 1: per_channel = False if per_channel: yout = np.einsum('ij,jk->ikj', u, sys.d.T) else: yout = u @ sys.d.T # Dynamic model else: # TODO: Add FOH discretization for funky input # ZOH discretize the continuous system based on the time increment if not is_discrete: sys = discretize(sys, t[1]-t[0], method='zoh') sample_num = len(u) a, b, c, d = sys.matrices # Bu and Du are constant matrices so get them ready (transposed) M_u = np.block([b.T, d.T]) at = a.T # Explicitly skip single inputs for per_channel if m == 1: per_channel = False # Shape the response as a 3D array if per_channel: xout = np.empty([sample_num, n, m], dtype=float) for col in range(m): xout[0, :, col] = 0. if x0 is None else x0.T Bu = u[:, [col]] @ b.T[[col], :] # Main loop for xdot eq. for row in range(1, sample_num): xout[row, :, col] = xout[row-1, :, col] @ at + Bu[row-1] # Get the output equation for each slice of inputs # Cx + Du yout = np.einsum('ijk,jl->ilk', xout, c.T) + \ np.einsum('ij,jk->ikj', u, d.T) # Combined output else: BDu = u @ M_u xout = np.empty([sample_num, n], dtype=float) xout[0] = 0. if x0 is None else x0.T # Main loop for xdot eq. for row in range(1, sample_num): xout[row] = (xout[row-1] @ at) + BDu[row-1, :n] # Now we have all the state evolution get the output equation yout = xout @ c.T + BDu[:, n:] return yout, t def simulate_step_response(sys, t=None): """ Compute the linear model response to an Heaviside function (or all-ones array) sampled at given time instances. If the time array is omitted then a time sequence is generated based on the poles of the model. Parameters ---------- sys : {State, Transfer} The system model to be simulated t : array_like The real-valued sequence to be used for the evolution of the system. The values should be equally spaced otherwise an error is raised. For discrete time models increments different than the sampling period also raises an error. On the other hand for discrete models this can be omitted and a time sequence will be generated automatically. Returns ------- yout : ndarray The resulting response array. The array is 1D if sys is SISO and has p columns if sys has p outputs. If there are also m inputs the array is 3D array with the shape (<num of samples>, p, m) tout : ndarray The time sequence used in the simulation. If the parameter t is not None then a copy of t is given. """ _check_for_state_or_transfer(sys) # Always works with State Models try: _check_for_state(sys) except ValueError: sys = transfer_to_state(sys) if t is None: tf, ts = _compute_tfinal_and_dt(sys) t = np.arange(0, tf+ts, ts, dtype=float) else: t, ts = _check_custom_time_input(t) m = sys.shape[1] u = np.ones([len(t), m], dtype=float) return simulate_linear_system(sys, u=u, t=t, per_channel=1) def simulate_impulse_response(sys, t=None): """ Compute the linear model response to an Dirac delta pulse (or all-zeros array except the first sample being 1/dt at each channel) sampled at given time instances. If the time array is omitted then a time sequence is generated based on the poles of the model. Parameters ---------- sys : {State, Transfer} The system model to be simulated t : array_like The real-valued sequence to be used for the evolution of the system. The values should be equally spaced otherwise an error is raised. For discrete time models increments different than the sampling period also raises an error. On the other hand for discrete models this can be omitted and a time sequence will be generated automatically. Returns ------- yout : ndarray The resulting response array. The array is 1D if sys is SISO and has p columns if sys has p outputs. If there are also m inputs the array is 3D array with the shape (<num of samples>, p, m) tout : ndarray The time sequence used in the simulation. If the parameter t is not None then a copy of t is given. """ _check_for_state_or_transfer(sys) # Always works with State Models try: _check_for_state(sys) except ValueError: sys = transfer_to_state(sys) if t is None: tf, ts = _compute_tfinal_and_dt(sys, is_step=False) t = np.arange(0, tf+ts, ts, dtype=float) else: t, ts = _check_custom_time_input(t) m = sys.shape[1] u = np.zeros([len(t), m], dtype=float) u[0] = 1./ts return simulate_linear_system(sys, u=u, t=t, per_channel=1) def _compute_tfinal_and_dt(sys, is_step=True): """ Helper function to estimate a final time and a sampling period for time domain simulations. It is essentially geared towards impulse response but is also used for step responses. For discrete-time models, obviously dt is inherent and only tfinal is computed. Parameters ---------- sys : {State, Transfer} The system to be investigated is_step : bool Scales the dc value by the magnitude of the nonzero mode since integrating the impulse response gives ∫exp(-λt) = -exp(-λt)/λ. Default is True. Returns ------- tfinal : float The final time instance for which the simulation will be performed. dt : float The estimated sampling period for the simulation. Notes ----- Just by evaluating the fastest mode for dt and slowest for tfinal often leads to unnecessary, bloated sampling (e.g., Transfer(1,[1,1001,1000])) since dt will be very small and tfinal will be too large though the fast mode hardly ever contributes. Similarly, change the numerator to [1, 2, 0] and the simulation would be unnecessarily long and the plot is virtually an L shape since the decay is so fast. Instead, a modal decomposition in time domain hence a truncated ZIR and ZSR can be used such that only the modes that have significant effect on the time response are taken. But the sensitivity of the eigenvalues complicate the matter since dλ = <w, dA*v> with <w,v> = 1. Hence we can only work with simple poles with this formulation. See Golub, Van Loan Section 7.2.2 for simple eigenvalue sensitivity about the nonunity of <w,v>. The size of the response is dependent on the size of the eigenshapes rather than the eigenvalues themselves. """ sqrt_eps = np.sqrt(np.spacing(1.)) min_points = 100 # min number of points min_points_z = 20 # min number of points max_points = 10000 # max number of points max_points_z = 75000 # max number of points for discrete models default_tfinal = 5 # Default simulation horizon total_cycles = 5 # number of cycles for oscillating modes pts_per_cycle = 25 # Number of points divide a period of oscillation log_decay_percent = np.log(100) # Factor of reduction for real pole decays # if a static model is given, don't bother with checks if sys._isgain: if sys._isdiscrete: return sys._dt*min_points_z, sys._dt else: return default_tfinal, default_tfinal / min_points if sys._isdiscrete: # System already has sampling fixed hence we can't fall into the same # trap mentioned above. Just get nonintegrating slow modes together # with the damping. dt = sys._dt tfinal = default_tfinal p = eigvals(sys.a) # Array Masks # unstable m_u = (np.abs(p) >= 1 + sqrt_eps) p_u, p = p[m_u], p[~m_u] if p_u.size > 0: m_u = (p_u.real < 0) & (np.abs(p_u.imag) < sqrt_eps) t_emp = np.max(log_decay_percent / np.abs(np.log(p_u[~m_u])/dt)) tfinal = max(tfinal, t_emp) # zero - negligible effect on tfinal m_z = np.abs(p) < sqrt_eps p = p[~m_z] # Negative reals- treated as oscillary mode m_nr = (p.real < 0) & (np.abs(p.imag) < sqrt_eps) p_nr, p = p[m_nr], p[~m_nr] if p_nr.size > 0: t_emp = np.max(log_decay_percent / np.abs((np.log(p_nr)/dt).real)) tfinal = max(tfinal, t_emp) # discrete integrators m_int = (p.real - 1 < sqrt_eps) & (np.abs(p.imag) < sqrt_eps) p_int, p = p[m_int], p[~m_int] # pure oscillatory modes m_w = (np.abs(np.abs(p) - 1) < sqrt_eps) p_w, p = p[m_w], p[~m_w] if p_w.size > 0: t_emp = total_cycles * 2 * np.pi / np.abs(np.log(p_w)/dt).min() tfinal = max(tfinal, t_emp) if p.size > 0: t_emp = log_decay_percent / np.abs((np.log(p)/dt).real).min() tfinal = max(tfinal, t_emp) if p_int.size > 0: tfinal = tfinal * 5 # Make tfinal an integer multiple of dt num_samples = tfinal // dt if num_samples > max_points_z: tfinal = dt * max_points_z else: tfinal = dt * num_samples return tfinal, dt # Improve conditioning via balancing and zeroing tiny entries # See <w,v> for [[1,2,0], [9,1,0.01], [1,2,10*np.pi]] before/after balance b, (sca, perm) = matrix_balance(sys.a, separate=True) p, l, r = eig(b, left=True, right=True) # Reciprocal of inner product <w,v> for each λ, (bound the ~infs by 1e12) # G = Transfer([1], [1,0,1]) gives zero sensitivity (bound by 1e-12) eig_sens = reciprocal(maximum(1e-12, einsum('ij,ij->j', l, r).real)) eig_sens = minimum(1e12, eig_sens) # Tolerances p[np.abs(p) < np.spacing(eig_sens * norm(b, 1))] = 0. # Incorporate balancing to outer factors l[perm, :] *= reciprocal(sca)[:, None] r[perm, :] *= sca[:, None] w, v = sys.c @ r, l.T.conj() @ sys.b origin = False # Computing the "size" of the response of each simple mode wn = np.abs(p) if np.any(wn == 0.): origin = True dc = zeros_like(p, dtype=float) # well-conditioned nonzero poles, np.abs just in case ok = np.abs(eig_sens) <= 1/sqrt_eps # the averaged t→∞ response of each simple λ on each i/o channel # See, A = [[-1, k], [0, -2]], response sizes are k-dependent (that is # R/L eigenvector dependent) dc[ok] = norm(v[ok, :], axis=1)*norm(w[:, ok], axis=0)*eig_sens[ok] dc[wn != 0.] /= wn[wn != 0] if is_step else 1. dc[wn == 0.] = 0. # double the oscillating mode magnitude for the conjugate dc[p.imag != 0.] *= 2 # Now get rid of noncontributing integrators and simple modes if any relevance = (dc > 0.1*dc.max()) | ~ok psub = p[relevance] wnsub = wn[relevance] tfinal, dt = [], [] ints = wnsub == 0. iw = (psub.imag != 0.) & (np.abs(psub.real) <= sqrt_eps) # Pure imaginary? if np.any(iw): tfinal += (total_cycles * 2 * np.pi / wnsub[iw]).tolist() dt += (2 * np.pi / pts_per_cycle / wnsub[iw]).tolist() # The rest ~ts = log(%ss value) / exp(Re(λ)t) texp_mode = log_decay_percent / np.abs(psub[~iw & ~ints].real) tfinal += texp_mode.tolist() dt += minimum(texp_mode / 50, (2 * np.pi / pts_per_cycle / wnsub[~iw & ~ints])).tolist() # All integrators? if len(tfinal) == 0: return default_tfinal*5, default_tfinal*5/min_points tfinal = np.max(tfinal)*(5 if origin else 1) dt = np.min(dt) dt = tfinal / max_points if tfinal // dt > max_points else dt tfinal = dt * min_points if tfinal // dt < min_points else tfinal return tfinal, dt def _check_u_and_t_for_simulation(m, dt, u, t, isdiscrete): """ Helper function to validate the input arguments for simulate_linear_system """ # Discrete models can omit t array, make one here for convenience if t is None: if not isdiscrete: raise ValueError('Continuous time models need an evenly spaced ' 'time sequence from which the sampling period ' 'will be obtained.') else: u_samples = len(u) t = np.linspace(0, (u_samples-1)*dt, num=u_samples) else: t = np.asarray(t, dtype=float).squeeze() if t.ndim != 1: raise ValueError('Time array needs to be a 1D array.') t_diff = np.diff(t) if not np.allclose(t_diff, t_diff[0]) or not t_diff[0] > 0.: raise ValueError('Time array should be equally spaced and ' 'increasing.') if isdiscrete and not np.isclose(dt, t_diff[0]): raise ValueError('Time array increment {} is not equal to the' ' model sampling period {}.'.format(t_diff[0], dt)) if u.size < 1: raise ValueError('The input array should at least have one point.') # First dimension is always # of samples if len(u) != len(t): raise ValueError('The input and time arrays should have the same' ' length. t: {} vs. u: {}'.format(t.shape, u.shape)) if u.shape[1] != m: raise ValueError('Number of input columns ({}) don\'t match the number' ' of inputs ({}) of the given model.' ''.format(u.shape[1], m)) return t def _check_custom_time_input(t): """ Helper function for simple and rather expensive checks for sanity """ t = atleast_1d(t) if t.ndim > 1: t = squeeze(t) if t.ndim > 1: raise ValueError('Time array should be a 1D array but has ' '{} nontrivial dimensions'.format(t.ndim)) if t.size < 2: raise ValueError('Time array should have at least two data points.') dt = t[1] - t[0] if dt <= 0.: raise ValueError('The time increment dt cannot be negative; ' 'Difference of the first two samples t1 - t0 = {}' ''.format(dt)) # np.diff is somewhat slower than the diff of the views if not np.allclose(t[1:] - t[:-1], dt): raise ValueError('Supplied time array is not numerically equally ' 'spaced (checked via numpy.allclose).') return t, dt
harold/_time_domain.py
18,814
Helper function for simple and rather expensive checks for sanity Helper function to validate the input arguments for simulate_linear_system Helper function to estimate a final time and a sampling period for time domain simulations. It is essentially geared towards impulse response but is also used for step responses. For discrete-time models, obviously dt is inherent and only tfinal is computed. Parameters ---------- sys : {State, Transfer} The system to be investigated is_step : bool Scales the dc value by the magnitude of the nonzero mode since integrating the impulse response gives ∫exp(-λt) = -exp(-λt)/λ. Default is True. Returns ------- tfinal : float The final time instance for which the simulation will be performed. dt : float The estimated sampling period for the simulation. Notes ----- Just by evaluating the fastest mode for dt and slowest for tfinal often leads to unnecessary, bloated sampling (e.g., Transfer(1,[1,1001,1000])) since dt will be very small and tfinal will be too large though the fast mode hardly ever contributes. Similarly, change the numerator to [1, 2, 0] and the simulation would be unnecessarily long and the plot is virtually an L shape since the decay is so fast. Instead, a modal decomposition in time domain hence a truncated ZIR and ZSR can be used such that only the modes that have significant effect on the time response are taken. But the sensitivity of the eigenvalues complicate the matter since dλ = <w, dA*v> with <w,v> = 1. Hence we can only work with simple poles with this formulation. See Golub, Van Loan Section 7.2.2 for simple eigenvalue sensitivity about the nonunity of <w,v>. The size of the response is dependent on the size of the eigenshapes rather than the eigenvalues themselves. Compute the linear model response to an Dirac delta pulse (or all-zeros array except the first sample being 1/dt at each channel) sampled at given time instances. If the time array is omitted then a time sequence is generated based on the poles of the model. Parameters ---------- sys : {State, Transfer} The system model to be simulated t : array_like The real-valued sequence to be used for the evolution of the system. The values should be equally spaced otherwise an error is raised. For discrete time models increments different than the sampling period also raises an error. On the other hand for discrete models this can be omitted and a time sequence will be generated automatically. Returns ------- yout : ndarray The resulting response array. The array is 1D if sys is SISO and has p columns if sys has p outputs. If there are also m inputs the array is 3D array with the shape (<num of samples>, p, m) tout : ndarray The time sequence used in the simulation. If the parameter t is not None then a copy of t is given. Compute the linear model response to an input array sampled at given time instances. Parameters ---------- sys : {State, Transfer} The system model to be simulated u : array_like The real-valued input sequence to force the model. 1D arrays for single input models and 2D arrays that has as many columns as the number of inputs are valid inputs. t : array_like, optional The real-valued sequence to be used for the evolution of the system. The values should be equally spaced otherwise an error is raised. For discrete time models increments different than the sampling period also raises an error. On the other hand for discrete models this can be omitted and a time sequence will be generated automatically. x0 : array_like, optional The initial condition array. If omitted an array of zeros is assumed. Note that Transfer models by definition assume zero initial conditions and will raise an error. per_channel : bool, optional If this is set to True and if the system has multiple inputs, the response of each input is returned individually. For example, if a system has 4 inputs and 3 outputs then the response shape becomes (num, p, m) instead of (num, p) where k-th slice (:, :, k) is the response from the k-th input channel. For single input systems, this keyword has no effect. Returns ------- yout : ndarray The resulting response array. The array is 1D if sys is SISO and has p columns if sys has p outputs. tout : ndarray The time sequence used in the simulation. If the parameter t is not None then a copy of t is given. Notes ----- For Transfer models, first conversion to a state model is performed and then the resulting model is used for computations. Compute the linear model response to an Heaviside function (or all-ones array) sampled at given time instances. If the time array is omitted then a time sequence is generated based on the poles of the model. Parameters ---------- sys : {State, Transfer} The system model to be simulated t : array_like The real-valued sequence to be used for the evolution of the system. The values should be equally spaced otherwise an error is raised. For discrete time models increments different than the sampling period also raises an error. On the other hand for discrete models this can be omitted and a time sequence will be generated automatically. Returns ------- yout : ndarray The resulting response array. The array is 1D if sys is SISO and has p columns if sys has p outputs. If there are also m inputs the array is 3D array with the shape (<num of samples>, p, m) tout : ndarray The time sequence used in the simulation. If the parameter t is not None then a copy of t is given. Quick initial condition checks Always works with State Models input and time arrays are regular move on Static gains are simple matrix multiplications with no x0 don't bother for single inputs Dynamic model TODO: Add FOH discretization for funky input ZOH discretize the continuous system based on the time increment Bu and Du are constant matrices so get them ready (transposed) Explicitly skip single inputs for per_channel Shape the response as a 3D array Main loop for xdot eq. Get the output equation for each slice of inputs Cx + Du Combined output Main loop for xdot eq. Now we have all the state evolution get the output equation Always works with State Models Always works with State Models min number of points min number of points max number of points max number of points for discrete models Default simulation horizon number of cycles for oscillating modes Number of points divide a period of oscillation Factor of reduction for real pole decays if a static model is given, don't bother with checks System already has sampling fixed hence we can't fall into the same trap mentioned above. Just get nonintegrating slow modes together with the damping. Array Masks unstable zero - negligible effect on tfinal Negative reals- treated as oscillary mode discrete integrators pure oscillatory modes Make tfinal an integer multiple of dt Improve conditioning via balancing and zeroing tiny entries See <w,v> for [[1,2,0], [9,1,0.01], [1,2,10*np.pi]] before/after balance Reciprocal of inner product <w,v> for each λ, (bound the ~infs by 1e12) G = Transfer([1], [1,0,1]) gives zero sensitivity (bound by 1e-12) Tolerances Incorporate balancing to outer factors Computing the "size" of the response of each simple mode well-conditioned nonzero poles, np.abs just in case the averaged t→∞ response of each simple λ on each i/o channel See, A = [[-1, k], [0, -2]], response sizes are k-dependent (that is R/L eigenvector dependent) double the oscillating mode magnitude for the conjugate Now get rid of noncontributing integrators and simple modes if any Pure imaginary? The rest ~ts = log(%ss value) / exp(Re(λ)t) All integrators? Discrete models can omit t array, make one here for convenience First dimension is always of samples np.diff is somewhat slower than the diff of the views
7,934
en
0.850075
# # Copyright 2020 Logical Clocks AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from hsfs import util from hsfs.constructor import query import humps class Join: INNER = "INNER" LEFT = "LEFT" RIGHT = "RIGHT" FULL = "FULL" CROSS = "CROSS" LEFT_SEMI_JOIN = "LEFT_SEMI_JOIN" COMMA = "COMMA" def __init__(self, query, on, left_on, right_on, join_type, prefix): self._query = query self._on = util.parse_features(on) self._left_on = util.parse_features(left_on) self._right_on = util.parse_features(right_on) self._join_type = join_type or self.INNER self._prefix = prefix def to_dict(self): return { "query": self._query, "on": self._on, "leftOn": self._left_on, "rightOn": self._right_on, "type": self._join_type, "prefix": self._prefix, } @classmethod def from_response_json(cls, json_dict): json_decamelized = humps.decamelize(json_dict) return cls( query=query.Query.from_response_json(json_decamelized["query"]), on=json_decamelized.get("on", None), left_on=json_decamelized.get("left_on", None), right_on=json_decamelized.get("right_on", None), join_type=json_decamelized.get("join_type", None), prefix=json_decamelized.get("prefix", None), ) @property def query(self): return self._query @query.setter def query(self, query): self._query = query
python/hsfs/constructor/join.py
2,092
Copyright 2020 Logical Clocks AB Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
572
en
0.862541
import tensorflow as tf import tensorflow.contrib.slim as slim from tflearn.layers.conv import global_avg_pool ####################### # 3d functions ####################### # convolution # 3D unet graph def unet(inputI, output_channel): """3D U-net""" phase_flag = 1 concat_dim = 4 conv1_1 = conv3d( input=inputI, output_chn=64, kernel_size=3, stride=1, use_bias=False, name='conv1') # conv1_1 (1, 96, 96, 96, 64) conv1_bn = tf.contrib.layers.batch_norm( conv1_1, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, is_training=phase_flag, scope="conv1_batch_norm") conv1_relu = tf.nn.relu(conv1_bn, name='conv1_relu') pool1_in = tf.layers.max_pooling3d( inputs=conv1_relu, pool_size=2, strides=2, name='pool1') # pool1 (1, 48, 48, 48, 64) # pool1_frac = fractal_net( # is_global_path_list[0], # global_path_list[0], # local_path_list[0], # self.Blocks, # self.Columns)(pool1_in) # pool1_old = pool1_in + pool1_frac pool1 = pool1_in conv2_1 = conv3d( input=pool1, output_chn=128, kernel_size=3, stride=1, use_bias=False, name='conv2') # (1, 48, 48, 48, 128) conv2_bn = tf.contrib.layers.batch_norm( conv2_1, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, is_training=phase_flag, scope="conv2_batch_norm") conv2_relu = tf.nn.relu(conv2_bn, name='conv2_relu') pool2_in = tf.layers.max_pooling3d( inputs=conv2_relu, pool_size=2, strides=2, name='pool2') # pool2 (1, 24, 24, 24, 128) # pool2_frac = fractal_net( # is_global_path_list[1], # global_path_list[1], # local_path_list[1], # self.Blocks, # self.Columns)(pool2_in) # pool2 = pool2_in + pool2_frac pool2 = pool2_in conv3_1 = conv3d( input=pool2, output_chn=256, kernel_size=3, stride=1, use_bias=False, name='conv3a') # (1, 24, 24, 24, 256) conv3_1_bn = tf.contrib.layers.batch_norm( conv3_1, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, is_training=phase_flag, scope="conv3_1_batch_norm") conv3_1_relu = tf.nn.relu(conv3_1_bn, name='conv3_1_relu') conv3_2 = conv3d( input=conv3_1_relu, output_chn=256, kernel_size=3, stride=1, use_bias=False, name='conv3b') # (1, 24, 24, 24, 256) conv3_2 = conv3_2 + conv3_1 conv3_2_bn = tf.contrib.layers.batch_norm( conv3_2, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, is_training=phase_flag, scope="conv3_2_batch_norm") conv3_2_relu = tf.nn.relu(conv3_2_bn, name='conv3_2_relu') pool3_in = tf.layers.max_pooling3d( inputs=conv3_2_relu, pool_size=2, strides=2, name='pool3') # pool3 (1, 12, 12, 12, 256) # pool3_frac = fractal_net( # is_global_path_list[2], # global_path_list[2], # local_path_list[2], # self.Blocks, # self.Columns)(pool3_in) pool3 = pool3_in # pool3 = pool3_in + pool3_frac conv4_1 = conv3d( input=pool3, output_chn=512, kernel_size=3, stride=1, use_bias=False, name='conv4a') # conv4_1 (1, 12, 12, 12, 512) conv4_1_bn = tf.contrib.layers.batch_norm( conv4_1, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, is_training=phase_flag, scope="conv4_1_batch_norm") conv4_1_relu = tf.nn.relu(conv4_1_bn, name='conv4_1_relu') conv4_2 = conv3d( input=conv4_1_relu, output_chn=512, kernel_size=3, stride=1, use_bias=False, name='conv4b') conv4_2 = conv4_2 + conv4_1 # conv4_2 (1, 12, 12, 12, 512) conv4_2_bn = tf.contrib.layers.batch_norm( conv4_2, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, is_training=phase_flag, scope="conv4_2_batch_norm") conv4_2_relu = tf.nn.relu(conv4_2_bn, name='conv4_2_relu') pool4 = tf.layers.max_pooling3d( inputs=conv4_2_relu, pool_size=2, strides=2, name='pool4') # pool4 (1, 6, 6, 6, 512) conv5_1 = conv_bn_relu( input=pool4, output_chn=512, kernel_size=3, stride=1, use_bias=False, is_training=phase_flag, name='conv5_1') # conv5_1 (1, 6, 6, 6, 512) conv5_2 = conv_bn_relu( input=conv5_1, output_chn=512, kernel_size=3, stride=1, use_bias=False, is_training=phase_flag, name='conv5_2') # conv5_2 (1, 6, 6, 6, 512) deconv1_1 = deconv_bn_relu( input=conv5_2, output_chn=512, is_training=phase_flag, name='deconv1_1') # (1, 12, 12, 12, 512) concat_1 = tf.concat([deconv1_1, conv4_2], axis=concat_dim, name='concat_1') # (1, 12, 12, 12, 1024) deconv1_2_in = conv_bn_relu( input=concat_1, output_chn=256, kernel_size=3, stride=1, use_bias=False, is_training=phase_flag, name='deconv1_2') # deconv1_2_frac = fractal_net( # is_global_path_list[3], # global_path_list[3], # local_path_list[3], # self.Blocks, # self.Columns)(deconv1_2_in) deconv1_2 = deconv1_2_in # deconv1_2 = deconv1_2_in + deconv1_2_frac # (1, 12, 12, 12, 256) deconv2_1 = deconv_bn_relu( input=deconv1_2, output_chn=256, is_training=phase_flag, name='deconv2_1') concat_2 = tf.concat([deconv2_1, conv3_2], axis=concat_dim, name='concat_2') # deconv2_2 (1, 24, 24, 24, 512) deconv2_2_in = conv_bn_relu( input=concat_2, output_chn=128, kernel_size=3, stride=1, use_bias=False, is_training=phase_flag, name='deconv2_2') # deconv2_2_frac = fractal_net( # is_global_path_list[4], # global_path_list[4], # local_path_list[4], # self.Blocks, # self.Columns)(deconv2_2_in) deconv2_2 = deconv2_2_in # deconv2_2 = deconv2_2_in + deconv2_2_frac # deconv2_2 (1, 24, 24, 24, 128) deconv3_1 = deconv_bn_relu( input=deconv2_2, output_chn=128, is_training=phase_flag, name='deconv3_1') # deconv3_1 (1, 48, 48, 48, 128) concat_3 = tf.concat([deconv3_1, conv2_1], axis=concat_dim, name='concat_3') # deconv3_1 (1, 48, 48, 48, 256) deconv3_2_in = conv_bn_relu( input=concat_3, output_chn=64, kernel_size=3, stride=1, use_bias=False, is_training=phase_flag, name='deconv3_2') # deconv3_2_frac = fractal_net( # is_global_path_list[5], # global_path_list[5], # local_path_list[5], # self.Blocks, # self.Columns)(deconv3_2_in) deconv3_2 = deconv3_2_in # deconv3_2 = deconv3_2_in + deconv3_2_frac # deconv3_2 (1, 48, 48, 48, 64) deconv4_1 = deconv_bn_relu( input=deconv3_2, output_chn=64, is_training=phase_flag, name='deconv4_1') # deconv4_2 (1, 96, 96, 96, 32) concat_4 = tf.concat([deconv4_1, conv1_1], axis=concat_dim, name='concat_4') # deconv4_2 (1, 96, 96, 96, 64) deconv4_2 = conv_bn_relu( input=concat_4, output_chn=32, kernel_size=3, stride=1, use_bias=False, is_training=phase_flag, name='deconv4_2') # deconv4_2 (1, 96, 96, 96, 32) pre_pro = conv3d( input=deconv4_2, output_chn=output_channel, kernel_size=1, stride=1, use_bias=True, name='pre_pro') # pred_frac = fractal_net(is_global_path_list[3],global_path_list[3],local_path_list[3],self.Blocks,self.Columns)(pre_pro) pred_prob = pre_pro # pred_prob (1, 96, 96, 96, 8) Here get the prediction # ======================For predicition============================= # auxiliary prediction 0 aux0_conv = conv3d( input=deconv1_2, output_chn=output_channel, kernel_size=1, stride=1, use_bias=True, name='aux0_conv') # aux0_conv (1, 12, 12, 12, 8) 8 class output aux0_deconv_1 = Deconv3d( input=aux0_conv, output_chn=output_channel, name='aux0_deconv_1') # aux0_deconv_1 (1, 24, 24, 24, 8) aux0_deconv_2 = Deconv3d( input=aux0_deconv_1, output_chn=output_channel, name='aux0_deconv_2') # aux0_deconv_2 (1, 48, 48, 48, 8) aux0_prob = Deconv3d( input=aux0_deconv_2, output_chn=output_channel, name='aux0_prob') # aux0_prob (1, 96, 96, 96, 8) # auxiliary prediction 1 aux1_conv = conv3d( input=deconv2_2, output_chn=output_channel, kernel_size=1, stride=1, use_bias=True, name='aux1_conv') # aux1_conv (1, 24, 24, 24, 8) aux1_deconv_1 = Deconv3d( input=aux1_conv, output_chn=output_channel, name='aux1_deconv_1') # aux1_deconv_1 (1, 48, 48, 48, 8) aux1_prob = Deconv3d( input=aux1_deconv_1, output_chn=output_channel, name='aux1_prob') # aux1_prob (1, 96, 96, 96, 8) # auxiliary prediction 2 aux2_conv = conv3d( input=deconv3_2, output_chn=output_channel, kernel_size=1, stride=1, use_bias=True, name='aux2_conv') # aux2_conv (1, 48, 48, 48, 8) aux2_prob = Deconv3d( input=aux2_conv, output_chn=output_channel, name='aux2_prob') # aux2_prob (1, 96, 96, 96, 8) soft_prob = tf.nn.softmax(pred_prob, name='pred_soft') pred_label = tf.argmax(soft_prob, axis=4, name='argmax') return pred_prob, pred_label, aux0_prob, aux1_prob, aux2_prob def unet_resnet(input_pred, input_img, output_channel, stage): input_shape = input_img.shape input_channel = input_shape.dims[-1].value input_pred_softmax = tf.nn.softmax(input_pred, name='softmax_ss' + stage) forground_input_pred = tf.expand_dims(input_pred_softmax[:, :, :, :, 1], axis=-1) input_concat = tf.concat([forground_input_pred, input_img], axis=-1) # (1, 96, 96, 96, 2) input_attention = forground_input_pred * input_img # (1, 96, 96, 96, input_channel) # conv block1 conv_bn_1_1 = conv_bn_relu(input=input_attention, output_chn=16, kernel_size=3, stride=1, use_bias=False, is_training=True, name=stage + 'block1_conv1') input_cat = tf.concat([input_attention, input_attention, input_attention, input_attention, input_attention, input_attention, input_attention, input_attention], axis=-1) # diffirence for odd input or even input if input_channel % 2 == 0 or input_channel == 1: input_tile = tf.tile(input=input_attention, multiples=[1, 1, 1, 1, int(16/input_channel)], name='tile' + stage) else: input_tile = tf.tile(input=input_attention, multiples=[1, 1, 1, 1, int(16/(input_channel-1))], name='tile' + stage) input_tile = input_tile[:,:,:,:,0:16] conv_bn_skip_1_1 = input_tile + conv_bn_1_1 pool1_1 = tf.layers.max_pooling3d(inputs=conv_bn_skip_1_1, pool_size=2, strides=2, name=stage + 'pool_1_1') # conv block2 conv_bn_2_1 = conv_bn_relu(input=pool1_1, output_chn=32, kernel_size=3, stride=1, use_bias=False, is_training=True, name=stage + 'block2_conv1') conv_bn_2_2 = conv_bn_relu(input=conv_bn_2_1, output_chn=32, kernel_size=3, stride=1, use_bias=False, is_training=True, name=stage + 'block2_conv2') pool1_1_cat = tf.concat([pool1_1, pool1_1], axis=-1) conv_bn_skip_2_1 = pool1_1_cat + conv_bn_2_2 pool_2_1 = tf.layers.max_pooling3d(inputs=conv_bn_skip_2_1, pool_size=2, strides=2, name=stage + 'pool2_2') # conv block3 conv_bn_3_1 = conv_bn_relu(input=pool_2_1, output_chn=64, kernel_size=3, stride=1, use_bias=False, is_training=True, name=stage + 'block3_conv1') conv_bn_3_2 = conv_bn_relu(input=conv_bn_3_1, output_chn=64, kernel_size=3, stride=1, use_bias=False, is_training=True, name=stage + 'block3_conv2') conv_bn_3_3 = conv_bn_relu(input=conv_bn_3_2, output_chn=64, kernel_size=3, stride=1, use_bias=False, is_training=True, name=stage + 'block3_conv3') pool_2_1_cat = tf.concat([pool_2_1, pool_2_1], axis=-1) conv_bn_skip_3_1 = conv_bn_3_3 + pool_2_1_cat pool3_1 = tf.layers.max_pooling3d(inputs=conv_bn_skip_3_1, pool_size=2, strides=2, name=stage + 'pool3_1') # conv block4 conv_bn_4_1 = conv_bn_relu(input=pool3_1, output_chn=128, kernel_size=3, stride=1, use_bias=False, is_training=True, name=stage + 'block4_conv1') conv_bn_4_2 = conv_bn_relu(input=conv_bn_4_1, output_chn=128, kernel_size=3, stride=1, use_bias=False, is_training=True, name=stage + 'block4_conv2') conv_bn_4_3 = conv_bn_relu(input=conv_bn_4_2, output_chn=128, kernel_size=3, stride=1, use_bias=False, is_training=True, name=stage + 'block4_conv3') pool3_1_cat = tf.concat([pool3_1, pool3_1], axis=-1) conv_bn_skip_4_1 = conv_bn_4_3 + pool3_1_cat pool4_1 = tf.layers.max_pooling3d(inputs=conv_bn_skip_4_1, pool_size=2, strides=2, name=stage + 'pool4_1') # conv block5 conv_bn_5_1 = conv_bn_relu(input=pool4_1, output_chn=256, kernel_size=3, stride=1, use_bias=False, is_training=True, name=stage + 'block5_conv1') conv_bn_5_2 = conv_bn_relu(input=conv_bn_5_1, output_chn=256, kernel_size=3, stride=1, use_bias=False, is_training=True, name=stage + 'block5_conv2') conv_bn_5_3 = conv_bn_relu(input=conv_bn_5_2, output_chn=256, kernel_size=3, stride=1, use_bias=False, is_training=True, name=stage + 'block5_conv3') pool4_1_cat = tf.concat([pool4_1, pool4_1], axis=-1) conv_bn_skip_5_1 = conv_bn_5_3 + pool4_1_cat # upsampling conv block6 deconv_bn_1_1 = deconv_bn_relu(input=conv_bn_skip_5_1, output_chn=128, is_training=True, name=stage + 'deconv_1_1') concat1 = tf.concat([deconv_bn_1_1, conv_bn_skip_4_1], axis=-1, name=stage + 'concat1') conv_bn_6_1 = conv_bn_relu(input=concat1, output_chn=256, kernel_size=3, stride=1, use_bias=False, is_training=True, name=stage + 'block6_conv1') conv_bn_6_2 = conv_bn_relu(input=conv_bn_6_1, output_chn=256, kernel_size=3, stride=1, use_bias=False, is_training=True, name=stage + 'block6_conv2') conv_bn_6_3 = conv_bn_relu(input=conv_bn_6_2, output_chn=256, kernel_size=3, stride=1, use_bias=False, is_training=True, name=stage + 'blovk6_conv3') deconv_bn_1_1_cat = tf.concat([deconv_bn_1_1, deconv_bn_1_1], axis=-1) conv_bn_skip_6_1 = conv_bn_6_3 + deconv_bn_1_1_cat # conv block7 deconv_bn_2_1 = deconv_bn_relu(input=conv_bn_skip_6_1, output_chn=64, is_training=True, name=stage + 'deconv_2_1') concat2 = tf.concat([deconv_bn_2_1, conv_bn_skip_3_1], axis=-1, name=stage + 'concat2') conv_bn_7_1 = conv_bn_relu(input=concat2, output_chn=128, kernel_size=3, stride=1, use_bias=False, is_training=True, name=stage + 'block7_conv1') conv_bn_7_2 = conv_bn_relu(input=conv_bn_7_1, output_chn=128, kernel_size=3, stride=1, use_bias=False, is_training=True, name=stage + 'block7_conv2') conv_bn_7_3 = conv_bn_relu(input=conv_bn_7_2, output_chn=128, kernel_size=3, stride=1, use_bias=False, is_training=True, name=stage + 'block7_conv3') deconv_bn_2_1_cat = tf.concat([deconv_bn_2_1, deconv_bn_2_1], axis=-1) conv_bn_skip_7_1 = conv_bn_7_3 + deconv_bn_2_1_cat # conv block8 deconv_bn_3_1 = deconv_bn_relu(input=conv_bn_skip_7_1, output_chn=32, is_training=True, name=stage + 'deconv_3_1') concat3 = tf.concat([deconv_bn_3_1, conv_bn_skip_2_1], axis=-1, name=stage + 'concat3') conv_bn_8_1 = conv_bn_relu(input=concat3, output_chn=64, kernel_size=3, stride=1, use_bias=False, is_training=True, name=stage + 'block8_conv1') conv_bn_8_2 = conv_bn_relu(input=conv_bn_8_1, output_chn=64, kernel_size=3, stride=1, use_bias=False, is_training=True, name=stage + 'block8_conv2') deconv_bn_3_1_cat = tf.concat([deconv_bn_3_1, deconv_bn_3_1], axis=-1) conv_bn_skip_8_1 = conv_bn_8_2 + deconv_bn_3_1_cat # conv block9 deconv_bn_4_1 = deconv_bn_relu(input=conv_bn_skip_8_1, output_chn=16, is_training=True, name=stage + 'deconv_4_1') concat4 = tf.concat([deconv_bn_4_1, conv_bn_skip_1_1], axis=-1, name=stage + 'conca4_1') conv_bn_9_1 = conv_bn_relu(input=concat4, output_chn=32, kernel_size=3, stride=1, use_bias=False, is_training=True, name=stage + 'block9_conv1') deconv_bn_4_1_cat = tf.concat([deconv_bn_4_1, deconv_bn_4_1], axis=-1) conv_bn_skip_9_1 = conv_bn_9_1 + deconv_bn_4_1_cat # prediction layer pred = conv3d(input=conv_bn_skip_9_1, output_chn=output_channel, kernel_size=1, stride=1, use_bias=True, name=stage + 'pred') soft_prob_v = tf.nn.softmax(pred, name='pred_soft_v') pred_label_v = tf.argmax(soft_prob_v, axis=4, name='argmax_v') return pred, pred_label_v def conv3d( input, output_chn, kernel_size, stride, use_bias=False, name='conv'): return tf.layers.conv3d( inputs=input, filters=output_chn, kernel_size=kernel_size, strides=stride, padding='same', data_format='channels_last', kernel_initializer=tf.truncated_normal_initializer( 0.0, 0.01), kernel_regularizer=slim.l2_regularizer(0.0005), use_bias=use_bias, name=name) def conv_bn_relu( input, output_chn, kernel_size, stride, use_bias, is_training, name): with tf.variable_scope(name): conv = conv3d( input, output_chn, kernel_size, stride, use_bias, name='conv') bn = tf.contrib.layers.batch_norm( conv, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, is_training=is_training, scope="batch_norm") relu = tf.nn.relu(bn, name='relu') return relu # deconvolution def Deconv3d(input, output_chn, name): batch, in_depth, in_height, in_width, in_channels = [ int(d) for d in input.get_shape()] filter = tf.get_variable( name + "/filter", shape=[ 4, 4, 4, output_chn, in_channels], dtype=tf.float32, initializer=tf.random_normal_initializer( 0, 0.01), regularizer=slim.l2_regularizer(0.0005)) conv = tf.nn.conv3d_transpose( value=input, filter=filter, output_shape=[ batch, in_depth * 2, in_height * 2, in_width * 2, output_chn], strides=[ 1, 2, 2, 2, 1], padding="SAME", name=name) return conv def Unsample(input, output_chn, name): batch, in_depth, in_height, in_width, in_channels = [ int(d) for d in input.get_shape()] base = input.shape[-2] data = 96 / int(base) print("base shape", data) filter = tf.get_variable( name + "/filter", shape=[ 4, 4, 4, output_chn, in_channels], dtype=tf.float32, initializer=tf.random_normal_initializer( 0, 0.01), regularizer=slim.l2_regularizer(0.0005)) conv = tf.nn.conv3d_transpose( value=input, filter=filter, output_shape=[ batch, 96, 96, 96, output_chn], strides=[ 1, data, data, data, 1], padding="SAME", name=name) return conv def deconv_bn_relu(input, output_chn, is_training, name): with tf.variable_scope(name): conv = Deconv3d(input, output_chn, name='deconv') # with tf.device("/cpu:0"): bn = tf.contrib.layers.batch_norm( conv, decay=0.9, updates_collections=None, epsilon=1e-5, scale=True, is_training=is_training, scope="batch_norm") relu = tf.nn.relu(bn, name='relu') return relu def conv_bn_relu_x3( input, output_chn, kernel_size, stride, use_bias, is_training, name): with tf.variable_scope(name): z = conv_bn_relu( input, output_chn, kernel_size, stride, use_bias, is_training, "dense1") z_out = conv_bn_relu( z, output_chn, kernel_size, stride, use_bias, is_training, "dense2") z_out = conv_bn_relu( z_out, output_chn, kernel_size, stride, use_bias, is_training, "dense3") return z + z_out
src/models.py
22,145
3D U-net 3d functions convolution 3D unet graph conv1_1 (1, 96, 96, 96, 64) pool1 (1, 48, 48, 48, 64) pool1_frac = fractal_net( is_global_path_list[0], global_path_list[0], local_path_list[0], self.Blocks, self.Columns)(pool1_in) pool1_old = pool1_in + pool1_frac (1, 48, 48, 48, 128) pool2 (1, 24, 24, 24, 128) pool2_frac = fractal_net( is_global_path_list[1], global_path_list[1], local_path_list[1], self.Blocks, self.Columns)(pool2_in) pool2 = pool2_in + pool2_frac (1, 24, 24, 24, 256) (1, 24, 24, 24, 256) pool3 (1, 12, 12, 12, 256) pool3_frac = fractal_net( is_global_path_list[2], global_path_list[2], local_path_list[2], self.Blocks, self.Columns)(pool3_in) pool3 = pool3_in + pool3_frac conv4_1 (1, 12, 12, 12, 512) conv4_2 (1, 12, 12, 12, 512) pool4 (1, 6, 6, 6, 512) conv5_1 (1, 6, 6, 6, 512) conv5_2 (1, 6, 6, 6, 512) (1, 12, 12, 12, 512) (1, 12, 12, 12, 1024) deconv1_2_frac = fractal_net( is_global_path_list[3], global_path_list[3], local_path_list[3], self.Blocks, self.Columns)(deconv1_2_in) deconv1_2 = deconv1_2_in + deconv1_2_frac (1, 12, 12, 12, 256) deconv2_2 (1, 24, 24, 24, 512) deconv2_2_frac = fractal_net( is_global_path_list[4], global_path_list[4], local_path_list[4], self.Blocks, self.Columns)(deconv2_2_in) deconv2_2 = deconv2_2_in + deconv2_2_frac deconv2_2 (1, 24, 24, 24, 128) deconv3_1 (1, 48, 48, 48, 128) deconv3_1 (1, 48, 48, 48, 256) deconv3_2_frac = fractal_net( is_global_path_list[5], global_path_list[5], local_path_list[5], self.Blocks, self.Columns)(deconv3_2_in) deconv3_2 = deconv3_2_in + deconv3_2_frac deconv3_2 (1, 48, 48, 48, 64) deconv4_2 (1, 96, 96, 96, 32) deconv4_2 (1, 96, 96, 96, 64) deconv4_2 (1, 96, 96, 96, 32) pred_frac = fractal_net(is_global_path_list[3],global_path_list[3],local_path_list[3],self.Blocks,self.Columns)(pre_pro) pred_prob (1, 96, 96, 96, 8) Here get the prediction ======================For predicition============================= auxiliary prediction 0 aux0_conv (1, 12, 12, 12, 8) 8 class output aux0_deconv_1 (1, 24, 24, 24, 8) aux0_deconv_2 (1, 48, 48, 48, 8) aux0_prob (1, 96, 96, 96, 8) auxiliary prediction 1 aux1_conv (1, 24, 24, 24, 8) aux1_deconv_1 (1, 48, 48, 48, 8) aux1_prob (1, 96, 96, 96, 8) auxiliary prediction 2 aux2_conv (1, 48, 48, 48, 8) aux2_prob (1, 96, 96, 96, 8) (1, 96, 96, 96, 2) (1, 96, 96, 96, input_channel) conv block1 diffirence for odd input or even input conv block2 conv block3 conv block4 conv block5 upsampling conv block6 conv block7 conv block8 conv block9 prediction layer deconvolution with tf.device("/cpu:0"):
2,662
en
0.476453
#!/usr/bin/env python2 # -*- encoding: utf-8 -*- import pygame import sys import numpy as np CONST_LOCK_FILE = "lock.txt" #CONST_GRAPH_FILE = "../tsptours/graph.tsp" CONST_GRAPH_FILE = "graph.tsp" CONST_STOP = "STOP" CONST_CUSTOM_FILE = None def main(): pygame.init() screen = pygame.display.set_mode((700,700)) screen.fill((255,255,255)) pygame.display.set_caption("Ant Colony TSP Solver - press ENTER to solve") graph = [] tour = [] cost = g = 0 state = 0 pygame.display.flip() while (True): for event in pygame.event.get(): if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE): print "El usuario ha decidido cerrar la aplicación." sys.exit() elif event.type == pygame.MOUSEBUTTONDOWN and state == 0 and CONST_CUSTOM_FILE: #print "Agregando la posición del click", pygame.mouse.get_pos() data = np.loadtxt(CONST_CUSTOM_FILE, dtype=int, delimiter=',') for line in data: line = (line[0]*7, line[1]*7) graph.append(line) pygame.draw.circle(screen, (0,0,0), line, 5, 0) pygame.display.flip() from_file = False elif event.type == pygame.MOUSEBUTTONDOWN and state == 0: #print "Agregando la posición del click", pygame.mouse.get_pos() graph.append(pygame.mouse.get_pos()) pygame.draw.circle(screen, (0,0,0), pygame.mouse.get_pos(), 5, 0) pygame.display.flip() elif event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN: lock_file = open(CONST_LOCK_FILE, "w") lock_file.write("0"); lock_file.close() graph_file = open(CONST_GRAPH_FILE, "w") graph_file.write("NAME : %s\n" % CONST_GRAPH_FILE) graph_file.write("COMMENT : %s-city problem\n" % str(len(graph))) graph_file.write("TYPE : TSP\n") graph_file.write("DIMENSION : %s\n" % str(len(graph))) graph_file.write("EDGE_WEIGHT_TYPE : EUC_2D\n") graph_file.write("NODE_COORD_SECTION\n") for x in range(0, len(graph)): #print "%d %d %d" % (x, graph[x][0], graph[x][1]) graph_file.write("%d %d %d" % (x, graph[x][0], graph[x][1])) graph_file.write("\n") graph_file.write("EOF") graph_file.close() lock_file = open("lock.txt", "w") lock_file.write("1"); lock_file.close() # Primera salida. tour = input() # [0, .., n-1, n] cost = input() # Costo del recorrido g = input() # Cantidad de iteraciones lock_file = open("lock.txt", "w") lock_file.write("0"); lock_file.close() state = 1 if state == 1: if tour != CONST_STOP: pygame.display.set_caption("Ant Colony TSP Solver - current length: " + str(cost) + " | iterations: " + str(g) + " (SOLVING...)") screen.fill((255,255,255)) # Vuelve a dibujar los círculos for i in graph: pygame.draw.circle(screen, (255,0,0), i, 5, 0) for i in range(0, len(tour)): pygame.draw.line(screen, (255, 0, 0), graph[tour[i]], graph[tour[(i + 1) % len(tour)]]) pygame.display.flip() # Salidas siguientes tour = input() if tour != CONST_STOP: cost = input() g = input() else: pygame.display.set_caption("Ant Colony TSP Solver - current length: " + str(cost) + " | iterations: " + str(g) + " (FINISHED)") finished = True state = 2 if __name__ == '__main__': if len(sys.argv) == 2: CONST_CUSTOM_FILE = sys.argv[1] main()
graphic/tsp_matt.py
4,156
!/usr/bin/env python2 -*- encoding: utf-8 -*-CONST_GRAPH_FILE = "../tsptours/graph.tsp"print "Agregando la posición del click", pygame.mouse.get_pos()print "Agregando la posición del click", pygame.mouse.get_pos()print "%d %d %d" % (x, graph[x][0], graph[x][1]) Primera salida. [0, .., n-1, n] Costo del recorrido Cantidad de iteraciones Vuelve a dibujar los círculos Salidas siguientes
386
es
0.609246
def extractReMonsterWiki(item): """ Parser for 'Re:Monster Wiki' """ vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or 'preview' in item['title'].lower(): return None if 'WATTT' in item['tags']: return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix) return False
WebMirror/management/rss_parser_funcs/feed_parse_extractReMonsterWiki.py
354
Parser for 'Re:Monster Wiki'
28
en
0.225286
#!/usr/bin/python # coding: utf-8 -*- # (c) 2017, Wayne Witzel III <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: tower_job_template author: "Wayne Witzel III (@wwitzel3)" version_added: "2.3" short_description: create, update, or destroy Ansible Tower job template. description: - Create, update, or destroy Ansible Tower job templates. See U(https://www.ansible.com/tower) for an overview. options: name: description: - Name to use for the job template. required: True type: str description: description: - Description to use for the job template. type: str job_type: description: - The job type to use for the job template. required: False choices: ["run", "check"] type: str inventory: description: - Name of the inventory to use for the job template. type: str project: description: - Name of the project to use for the job template. required: True type: str playbook: description: - Path to the playbook to use for the job template within the project provided. required: True type: str credential: description: - Name of the credential to use for the job template. - Deprecated, mutually exclusive with 'credentials'. version_added: 2.7 type: str credentials: description: - List of credentials to use for the job template. - Will not remove any existing credentials. This may change in the future. version_added: 2.8 type: list default: [] vault_credential: description: - Name of the vault credential to use for the job template. - Deprecated, mutually exclusive with 'credential'. version_added: 2.7 type: str forks: description: - The number of parallel or simultaneous processes to use while executing the playbook. type: int limit: description: - A host pattern to further constrain the list of hosts managed or affected by the playbook type: str verbosity: description: - Control the output level Ansible produces as the playbook runs. 0 - Normal, 1 - Verbose, 2 - More Verbose, 3 - Debug, 4 - Connection Debug. choices: [0, 1, 2, 3, 4] default: 0 type: int extra_vars: description: - Specify C(extra_vars) for the template. type: dict version_added: 3.7 extra_vars_path: description: - This parameter has been deprecated, please use 'extra_vars' instead. - Path to the C(extra_vars) YAML file. type: path job_tags: description: - Comma separated list of the tags to use for the job template. type: str force_handlers_enabled: description: - Enable forcing playbook handlers to run even if a task fails. version_added: 2.7 type: bool default: 'no' skip_tags: description: - Comma separated list of the tags to skip for the job template. type: str start_at_task: description: - Start the playbook at the task matching this name. version_added: 2.7 type: str diff_mode_enabled: description: - Enable diff mode for the job template. version_added: 2.7 type: bool default: 'no' fact_caching_enabled: description: - Enable use of fact caching for the job template. version_added: 2.7 type: bool default: 'no' host_config_key: description: - Allow provisioning callbacks using this host config key. type: str ask_diff_mode: description: - Prompt user to enable diff mode (show changes) to files when supported by modules. version_added: 2.7 type: bool default: 'no' ask_extra_vars: description: - Prompt user for (extra_vars) on launch. type: bool default: 'no' ask_limit: description: - Prompt user for a limit on launch. version_added: 2.7 type: bool default: 'no' ask_tags: description: - Prompt user for job tags on launch. type: bool default: 'no' ask_skip_tags: description: - Prompt user for job tags to skip on launch. version_added: 2.7 type: bool default: 'no' ask_job_type: description: - Prompt user for job type on launch. type: bool default: 'no' ask_verbosity: description: - Prompt user to choose a verbosity level on launch. version_added: 2.7 type: bool default: 'no' ask_inventory: description: - Prompt user for inventory on launch. type: bool default: 'no' ask_credential: description: - Prompt user for credential on launch. type: bool default: 'no' survey_enabled: description: - Enable a survey on the job template. version_added: 2.7 type: bool default: 'no' survey_spec: description: - JSON/YAML dict formatted survey definition. version_added: 2.8 type: dict required: False become_enabled: description: - Activate privilege escalation. type: bool default: 'no' concurrent_jobs_enabled: description: - Allow simultaneous runs of the job template. version_added: 2.7 type: bool default: 'no' timeout: description: - Maximum time in seconds to wait for a job to finish (server-side). type: int custom_virtualenv: version_added: "2.9" description: - Local absolute file path containing a custom Python virtualenv to use. type: str required: False default: '' state: description: - Desired state of the resource. default: "present" choices: ["present", "absent"] type: str extends_documentation_fragment: awx.awx.auth notes: - JSON for survey_spec can be found in Tower API Documentation. See U(https://docs.ansible.com/ansible-tower/latest/html/towerapi/api_ref.html#/Job_Templates/Job_Templates_job_templates_survey_spec_create) for POST operation payload example. ''' EXAMPLES = ''' - name: Create tower Ping job template tower_job_template: name: "Ping" job_type: "run" inventory: "Local" project: "Demo" playbook: "ping.yml" credential: "Local" state: "present" tower_config_file: "~/tower_cli.cfg" survey_enabled: yes survey_spec: "{{ lookup('file', 'my_survey.json') }}" custom_virtualenv: "/var/lib/awx/venv/custom-venv/" ''' from ..module_utils.ansible_tower import TowerModule, tower_auth_config, tower_check_mode import json try: import tower_cli import tower_cli.exceptions as exc from tower_cli.conf import settings except ImportError: pass def update_fields(module, p): '''This updates the module field names to match the field names tower-cli expects to make calling of the modify/delete methods easier. ''' params = p.copy() field_map = { 'fact_caching_enabled': 'use_fact_cache', 'ask_diff_mode': 'ask_diff_mode_on_launch', 'ask_extra_vars': 'ask_variables_on_launch', 'ask_limit': 'ask_limit_on_launch', 'ask_tags': 'ask_tags_on_launch', 'ask_skip_tags': 'ask_skip_tags_on_launch', 'ask_verbosity': 'ask_verbosity_on_launch', 'ask_inventory': 'ask_inventory_on_launch', 'ask_credential': 'ask_credential_on_launch', 'ask_job_type': 'ask_job_type_on_launch', 'diff_mode_enabled': 'diff_mode', 'concurrent_jobs_enabled': 'allow_simultaneous', 'force_handlers_enabled': 'force_handlers', } params_update = {} for old_k, new_k in field_map.items(): v = params.pop(old_k) params_update[new_k] = v extra_vars = params.get('extra_vars') extra_vars_path = params.get('extra_vars_path') if extra_vars: params_update['extra_vars'] = [json.dumps(extra_vars)] elif extra_vars_path is not None: params_update['extra_vars'] = ['@' + extra_vars_path] module.deprecate( msg='extra_vars_path should not be used anymore. Use \'extra_vars: "{{ lookup(\'file\', \'/path/to/file\') | from_yaml }}"\' instead', version="3.8" ) params.update(params_update) return params def update_resources(module, p): params = p.copy() identity_map = { 'project': 'name', 'inventory': 'name', 'credential': 'name', 'vault_credential': 'name', } for k, v in identity_map.items(): try: if params[k]: key = 'credential' if '_credential' in k else k result = tower_cli.get_resource(key).get(**{v: params[k]}) params[k] = result['id'] elif k in params: # unset empty parameters to avoid ValueError: invalid literal for int() with base 10: '' del(params[k]) except (exc.NotFound) as excinfo: module.fail_json(msg='Failed to update job template: {0}'.format(excinfo), changed=False) return params def main(): argument_spec = dict( name=dict(required=True), description=dict(default=''), job_type=dict(choices=['run', 'check']), inventory=dict(default=''), project=dict(required=True), playbook=dict(required=True), credential=dict(default=''), vault_credential=dict(default=''), custom_virtualenv=dict(type='str', required=False), credentials=dict(type='list', default=[]), forks=dict(type='int'), limit=dict(default=''), verbosity=dict(type='int', choices=[0, 1, 2, 3, 4], default=0), extra_vars=dict(type='dict', required=False), extra_vars_path=dict(type='path', required=False), job_tags=dict(default=''), force_handlers_enabled=dict(type='bool', default=False), skip_tags=dict(default=''), start_at_task=dict(default=''), timeout=dict(type='int', default=0), fact_caching_enabled=dict(type='bool', default=False), host_config_key=dict(default=''), ask_diff_mode=dict(type='bool', default=False), ask_extra_vars=dict(type='bool', default=False), ask_limit=dict(type='bool', default=False), ask_tags=dict(type='bool', default=False), ask_skip_tags=dict(type='bool', default=False), ask_job_type=dict(type='bool', default=False), ask_verbosity=dict(type='bool', default=False), ask_inventory=dict(type='bool', default=False), ask_credential=dict(type='bool', default=False), survey_enabled=dict(type='bool', default=False), survey_spec=dict(type='dict', required=False), become_enabled=dict(type='bool', default=False), diff_mode_enabled=dict(type='bool', default=False), concurrent_jobs_enabled=dict(type='bool', default=False), state=dict(choices=['present', 'absent'], default='present'), ) module = TowerModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ ('credential', 'credentials'), ('vault_credential', 'credentials'), ('extra_vars_path', 'extra_vars'), ] ) name = module.params.get('name') state = module.params.pop('state') json_output = {'job_template': name, 'state': state} tower_auth = tower_auth_config(module) with settings.runtime_values(**tower_auth): tower_check_mode(module) jt = tower_cli.get_resource('job_template') params = update_resources(module, module.params) params = update_fields(module, params) params['create_on_missing'] = True try: if state == 'present': result = jt.modify(**params) json_output['id'] = result['id'] elif state == 'absent': result = jt.delete(**params) except (exc.ConnectionError, exc.BadRequest, exc.NotFound, exc.AuthError) as excinfo: module.fail_json(msg='Failed to update job template: {0}'.format(excinfo), changed=False) cred_list = module.params.get('credentials') if cred_list: cred = tower_cli.get_resource('credential') for cred_name in cred_list: try: cred_id = cred.get(name=cred_name)['id'] r = jt.associate_credential(result['id'], cred_id) except (exc.ConnectionError, exc.BadRequest, exc.NotFound, exc.AuthError) as excinfo: module.fail_json(msg='Failed to add credential to job template: {0}'.format(excinfo), changed=False) if r.get('changed'): result['changed'] = True json_output['changed'] = result['changed'] module.exit_json(**json_output) if __name__ == '__main__': main()
awx_collection/plugins/modules/tower_job_template.py
13,457
This updates the module field names to match the field names tower-cli expects to make calling of the modify/delete methods easier. !/usr/bin/python coding: utf-8 -*- (c) 2017, Wayne Witzel III <[email protected]> GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) unset empty parameters to avoid ValueError: invalid literal for int() with base 10: ''
398
en
0.55826
from django.db import models from django.conf import settings from django.contrib.auth.models import User from django.db.models.signals import post_save # Create your models here. class Profile(models.Model): user = models.OneToOneField(settings.AUTH_USER_MODEL) date_of_birth = models.DateField(blank=True, null=True) photo = models.ImageField(upload_to='users/%Y/%m/%d', blank=True) course_bookmark = models.CharField(max_length=100, default='the-strategy') module_bookmark = models.PositiveIntegerField(default=0) def __str__(self): return 'Profile for user {}'.format(self.user.username) class Contact(models.Model): user_from = models.ForeignKey(User, related_name='rel_from_set') user_to = models.ForeignKey(User, related_name='rel_to_set') created = models.DateTimeField(auto_now_add=True, db_index=True) class Meta: ordering = ('-created',) def __str__(self): return '{} follows {}'.format(self.user_from, self.user_to) User.add_to_class('following', models.ManyToManyField('self', through=Contact, related_name='followers', symmetrical=False)) # Signal to auto-create a profile when a User is created. def create_user_profile(sender, instance, created, **kwargs): if created: Profile.objects.create(user=instance) post_save.connect(create_user_profile, sender=User)
account/models.py
1,364
Create your models here. Signal to auto-create a profile when a User is created.
80
en
0.946935
# -*- coding: utf-8 -*- from irc3.plugins.command import command @command def echo(bot, mask, target, args): """Echo command %%echo <words>... """ yield ' '.join(args['<words>']) @command(permission='admin', public=False) def adduser(bot, mask, target, args): """Add a user %%adduser <name> <password> """ bot.privmsg(mask.nick, 'User added') @command(show_in_help_list=False) def my_secret_operation(bot, mask, target, args): """Do something you don't want in !help all the time %%my_secret_operation """ yield "I like turtles"
examples/mycommands.py
598
Add a user %%adduser <name> <password> Echo command %%echo <words>... Do something you don't want in !help all the time %%my_secret_operation -*- coding: utf-8 -*-
168
en
0.696732
########################################################################### # # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ########################################################################### from starthinker.util.project import project from starthinker.util.salesforce import get_service if __name__ == '__main__': project.from_commandline('setup') service = get_service() print('Credentials Ready: %s' % project.recipe['setup']['auth']['salesforce'])
starthinker/util/salesforce/quickstart.py
1,010
Copyright 2020 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
557
en
0.870924
# Copyright (c) 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os from metrics import power from telemetry import test from telemetry.core import util from telemetry.page import page_measurement from telemetry.page import page_set class _DromaeoMeasurement(page_measurement.PageMeasurement): def __init__(self): super(_DromaeoMeasurement, self).__init__() self._power_metric = power.PowerMetric() def CustomizeBrowserOptions(self, options): power.PowerMetric.CustomizeBrowserOptions(options) def DidNavigateToPage(self, page, tab): self._power_metric.Start(page, tab) def MeasurePage(self, page, tab, results): tab.WaitForJavaScriptExpression( 'window.document.cookie.indexOf("__done=1") >= 0', 600) self._power_metric.Stop(page, tab) self._power_metric.AddResults(tab, results) js_get_results = 'JSON.stringify(window.automation.GetResults())' print js_get_results score = eval(tab.EvaluateJavaScript(js_get_results)) def Escape(k): chars = [' ', '-', '/', '(', ')', '*'] for c in chars: k = k.replace(c, '_') return k suffix = page.url[page.url.index('?') + 1 : page.url.index('&')] for k, v in score.iteritems(): data_type = 'unimportant' if k == suffix: data_type = 'default' results.Add(Escape(k), 'runs/s', float(v), data_type=data_type) class _DromaeoBenchmark(test.Test): """A base class for Dromaeo benchmarks.""" test = _DromaeoMeasurement def CreatePageSet(self, options): """Makes a PageSet for Dromaeo benchmarks.""" # Subclasses are expected to define a class member called query_param. if not hasattr(self, 'query_param'): raise NotImplementedError('query_param not in Dromaeo benchmark.') url = 'file://index.html?%s&automated' % self.query_param # The docstring of benchmark classes may also be used as a description # when 'run_benchmarks list' is run. description = self.__doc__ or 'Dromaeo JavaScript Benchmark' page_set_dict = { 'description': description, 'pages': [{'url': url}], } dromaeo_dir = os.path.join(util.GetChromiumSrcDir(), 'chrome', 'test', 'data', 'dromaeo') return page_set.PageSet.FromDict(page_set_dict, dromaeo_dir) class DromaeoDomCoreAttr(_DromaeoBenchmark): """Dromaeo DOMCore attr JavaScript benchmark.""" tag = 'domcoreattr' query_param = 'dom-attr' class DromaeoDomCoreModify(_DromaeoBenchmark): """Dromaeo DOMCore modify JavaScript benchmark.""" tag = 'domcoremodify' query_param = 'dom-modify' class DromaeoDomCoreQuery(_DromaeoBenchmark): """Dromaeo DOMCore query JavaScript benchmark.""" tag = 'domcorequery' query_param = 'dom-query' class DromaeoDomCoreTraverse(_DromaeoBenchmark): """Dromaeo DOMCore traverse JavaScript benchmark.""" tag = 'domcoretraverse' query_param = 'dom-traverse' class DromaeoJslibAttrJquery(_DromaeoBenchmark): """Dromaeo JSLib attr jquery JavaScript benchmark""" tag = 'jslibattrjquery' query_param = 'jslib-attr-jquery' class DromaeoJslibAttrPrototype(_DromaeoBenchmark): """Dromaeo JSLib attr prototype JavaScript benchmark""" tag = 'jslibattrprototype' query_param = 'jslib-attr-prototype' class DromaeoJslibEventJquery(_DromaeoBenchmark): """Dromaeo JSLib event jquery JavaScript benchmark""" tag = 'jslibeventjquery' query_param = 'jslib-event-jquery' class DromaeoJslibEventPrototype(_DromaeoBenchmark): """Dromaeo JSLib event prototype JavaScript benchmark""" tag = 'jslibeventprototype' query_param = 'jslib-event-prototype' class DromaeoJslibModifyJquery(_DromaeoBenchmark): """Dromaeo JSLib modify jquery JavaScript benchmark""" tag = 'jslibmodifyjquery' query_param = 'jslib-modify-jquery' class DromaeoJslibModifyPrototype(_DromaeoBenchmark): """Dromaeo JSLib modify prototype JavaScript benchmark""" tag = 'jslibmodifyprototype' query_param = 'jslib-modify-prototype' class DromaeoJslibStyleJquery(_DromaeoBenchmark): """Dromaeo JSLib style jquery JavaScript benchmark""" tag = 'jslibstylejquery' query_param = 'jslib-style-jquery' class DromaeoJslibStylePrototype(_DromaeoBenchmark): """Dromaeo JSLib style prototype JavaScript benchmark""" tag = 'jslibstyleprototype' query_param = 'jslib-style-prototype' class DromaeoJslibTraverseJquery(_DromaeoBenchmark): """Dromaeo JSLib traverse jquery JavaScript benchmark""" tag = 'jslibtraversejquery' query_param = 'jslib-traverse-jquery' class DromaeoJslibTraversePrototype(_DromaeoBenchmark): """Dromaeo JSLib traverse prototype JavaScript benchmark""" tag = 'jslibtraverseprototype' query_param = 'jslib-traverse-prototype'
tools/perf/benchmarks/dromaeo.py
4,824
Copyright (c) 2013 The Chromium Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. Subclasses are expected to define a class member called query_param. The docstring of benchmark classes may also be used as a description when 'run_benchmarks list' is run.
332
en
0.933413
# ------------------------------------------------------------------------------------------------------ # Copyright (c) Leo Hanisch. All rights reserved. # Licensed under the BSD 3-Clause License. See LICENSE.txt in the project root for license information. # ------------------------------------------------------------------------------------------------------ # pylint: disable=too-many-instance-attributes from copy import deepcopy import logging import numpy as np from .nest import Nest from ..util import levy_flight as cuckoo from .visualizer import Visualizer LOGGER = logging.getLogger(__name__) class CuckooProblem: def __init__(self, **kwargs): """ Initialize a new cuckoo search problem. """ self.__upper_boundary = kwargs.get('upper_boundary', 4.) self.__lower_boundary = kwargs.get('lower_boundary', 0.) self.__alpha = kwargs.pop('alpha', 1) self.__max_generations = kwargs.pop('max_generations', 10) self.__lambda = kwargs.pop('lambda', 1.5) self.__p_a = kwargs.pop('p_a', .1) self.__function = kwargs['function'] self.__nests = [ Nest(lower_boundary=self.__lower_boundary, upper_boundary=self.__upper_boundary, function=self.__function) for _ in range(kwargs['nests']) ] # Initialize visualizer for plotting kwargs['iteration_number'] = self.__max_generations self.__visualizer = Visualizer(**kwargs) def solve(self) -> Nest: nest_indices = np.array(range(len(self.__nests))) best_nest = deepcopy(min(self.__nests, key=lambda nest: nest.value)) positions, abandoned = zip(*[(nest.position, nest.abandoned) for nest in self.__nests]) self.__visualizer.add_data(positions=positions, best_position=best_nest.position, abandoned=abandoned) LOGGER.info('Iteration 0 best solution="%s" at position="%s"', best_nest.value, best_nest.position) for iteration in range(self.__max_generations): # Perform levy flights to get cuckoo's new position new_cuckoo_pos = [ np.clip(cuckoo.levy_flight(nest.position, self.__alpha, self.__lambda), a_min=self.__lower_boundary, a_max=self.__upper_boundary) for nest in self.__nests ] # Randomly select nests to be updated np.random.shuffle(nest_indices) # Update nests for index, pos in zip(nest_indices, new_cuckoo_pos): self.__nests[index].update_pos(pos) # Abandon nests randomly considering p_a for nest in self.__nests: if np.random.random_sample() < self.__p_a: nest.abandon() # Update best nest current_best = min(self.__nests, key=lambda nest: nest.value) if current_best.value < best_nest.value: best_nest = deepcopy(current_best) LOGGER.info('Iteration %i Found new best solution="%s" at position="%s"', iteration+1, best_nest.value, best_nest.position) # Add data for plot positions, abandoned = zip(*[(nest.position, nest.abandoned) for nest in self.__nests]) self.__visualizer.add_data(positions=positions, best_position=current_best.position, abandoned=abandoned) LOGGER.info('Last best solution="%s" at position="%s"', best_nest.value, best_nest.position) return best_nest def replay(self): """ Start the problems visualization. """ self.__visualizer.replay()
swarmlib/cuckoosearch/cuckoo_problem.py
3,592
Initialize a new cuckoo search problem. Start the problems visualization. ------------------------------------------------------------------------------------------------------ Copyright (c) Leo Hanisch. All rights reserved. Licensed under the BSD 3-Clause License. See LICENSE.txt in the project root for license information. ------------------------------------------------------------------------------------------------------ pylint: disable=too-many-instance-attributes Initialize visualizer for plotting Perform levy flights to get cuckoo's new position Randomly select nests to be updated Update nests Abandon nests randomly considering p_a Update best nest Add data for plot
686
en
0.538671
import os import tensorflow as tf from merge.model import Model def run_model_on_random_input(model): batch_size = 1 height = 100 width = 200 inputs = { 'image': tf.random.uniform(shape=(batch_size, height, width, 3), minval=0, maxval=256, dtype='int32'), 'horz_split_points_probs': tf.random.uniform(shape=(batch_size, height), dtype='float32'), 'vert_split_points_probs': tf.random.uniform(shape=(batch_size, width), dtype='float32'), 'horz_split_points_binary': tf.random.uniform(shape=(batch_size, height), minval=0, maxval=2, dtype='int32'), 'vert_split_points_binary': tf.random.uniform(shape=(batch_size, width), minval=0, maxval=2, dtype='int32') } model(inputs) def load_model(model_file_path, compute_metric): assert os.path.exists(model_file_path) model = Model(compute_metric) run_model_on_random_input(model) model.load_weights(model_file_path) # Metric can't be calculated in graph mode. run_eagerly = True if compute_metric else False model.compile(run_eagerly=run_eagerly) return model def convert_ds_element_to_tuple(element): input_keys = [ 'image', 'horz_split_points_probs', 'vert_split_points_probs', 'horz_split_points_binary', 'vert_split_points_binary' ] return ( {key: element[key] for key in input_keys}, { 'markup_table': element['markup_table'] } )
merge/evaluation.py
1,474
Metric can't be calculated in graph mode.
41
en
0.981942
""" Mplot demo runner """ import enaml from enaml.qt.qt_application import QtApplication def run_demo(): with enaml.imports(): #from griddata_demo_ui import Main from griddata_demo_model_ui import Main app = QtApplication() view = Main(custom_title='Matplotlib demo', mplot_style='darkish') view.show() # Start the application event loop app.start() run_demo()
tutorial/grid_data_demo_run.py
409
Mplot demo runner from griddata_demo_ui import Main Start the application event loop
85
en
0.801937
import os import tempfile from tests.STDF.STDFRecordTest import STDFRecordTest from STDF import FAR # File Attributes Record # Functuion: # Contains the information necessary to determine # how to decode the STDF datacontained in the file. def test_FAR(): far('<') far('>') def far(end): # STDF v4 page 57 record = FAR(endian = end) # Test serialization # 1. Save FAR STDF record into a file # 2. Read byte by byte and compare with expected value tf = tempfile.NamedTemporaryFile(delete=False) f = open(tf.name, "wb") w_data = record.__repr__() f.write(w_data) f.close f = open(tf.name, "rb") stdfRecTest = STDFRecordTest(f, end) # rec_len, rec_type, rec_sub stdfRecTest.assert_file_record_header(2, 0, 10) # Test REC_CPU, expected value 2 stdfRecTest.assert_ubyte(2); # Test STDF_VER, expected value 4 stdfRecTest.assert_ubyte(4); f.close() # Test de-serialization # 1. Open STDF record from a file # 2. Read record fields and compare with the expected value inst = FAR('V4', end, w_data) # rec_len, rec_type, rec_sub stdfRecTest.assert_instance_record_header(inst , 2, 0, 10) # Test REC_CPU field, position 3, value 2 stdfRecTest.assert_instance_field(inst, 3, 2); # Test STDF_VER field, position 4, value 4 stdfRecTest.assert_instance_field(inst, 4, 4); # Test ATDF output expected_atdf = "FAR:A|4|2|U" assert inst.to_atdf() == expected_atdf # ToDo: Test JSON output os.remove(tf.name)
tests/STDF/test_FAR.py
1,563
File Attributes Record Functuion: Contains the information necessary to determine how to decode the STDF datacontained in the file. STDF v4 page 57 Test serialization 1. Save FAR STDF record into a file 2. Read byte by byte and compare with expected value rec_len, rec_type, rec_sub Test REC_CPU, expected value 2 Test STDF_VER, expected value 4 Test de-serialization 1. Open STDF record from a file 2. Read record fields and compare with the expected value rec_len, rec_type, rec_sub Test REC_CPU field, position 3, value 2 Test STDF_VER field, position 4, value 4 Test ATDF output ToDo: Test JSON output
648
en
0.548751
#!/usr/bin/env python3 import sys import re class Num: def __init__(self, value): self.value = value def __add__(self, num): return Num(self.value * num.value) def __mul__(self, num): return Num(self.value + num.value) s = 0 for line in sys.stdin: line = line.replace("+", "$").replace("*", "+").replace("$", "*") line = re.sub(r"(\d)", r"Num(\1)", line) s += eval(line).value print(s)
problems/day-18/part_2.py
441
!/usr/bin/env python3
21
fr
0.448822
# -*- coding: utf-8 -*- import warnings # warnings.filterwarnings("ignore") # 抑制告警,并指定采取的措施 warnings.warn("# This is a test warning 111.") print("Hello One") warnings.filterwarnings("ignore", category=DeprecationWarning) # 抑制特定类型的警告 warnings.warn("# This is a test warning 222.", DeprecationWarning) # 被抑制 warnings.warn("# Something else.") # 未被抑制 print("Hello Two") warnings.filterwarnings("error") # 将警告转换为错误 warnings.warn("# This is a test warning 333.", DeprecationWarning) # 指定引发的异常 print("Hello Three") # ### 警告 # 警告不是异常,不影响程序的运行,可用于指示程序的状态; # 可根据异常来过滤掉特定类型的警告; # 发出警告时,可指定引发的异常(告警类别必须是Warning的子类);
Python3-Basics/Chapter11_Exception02_Warning.py
869
-*- coding: utf-8 -*- warnings.filterwarnings("ignore") 抑制告警,并指定采取的措施 抑制特定类型的警告 被抑制 未被抑制 将警告转换为错误 指定引发的异常 警告 警告不是异常,不影响程序的运行,可用于指示程序的状态; 可根据异常来过滤掉特定类型的警告; 发出警告时,可指定引发的异常(告警类别必须是Warning的子类);
192
zh
0.978973
#!/usr/bin/env python # -*- coding: utf-8 -*- """Script to Test Deep Learning Model. Contains a pipeline to test a deep learning model. Revision History: 2021-11-20 (ANI717 - Animesh Bala Ani): Baseline Software. Example: $ python3 test.py """ #___Import Modules: import torch from torch.utils.data import DataLoader from tqdm import tqdm import config from model import NvidiaNet from dataset import ANI717Dataset #___Main Method: def main(): # Load Data dataset = ANI717Dataset(config.TEST_CSV, config.IMG_SOURCE, transforms=config.TEST_TRANSFORMS) loader = DataLoader(dataset, batch_size=1, shuffle=False) # Initialize Model with Weights model = NvidiaNet(in_channels=config.IMG_SHAPE[0]).to(config.DEVICE) model.load_state_dict(torch.load(config.MODEL_FILE, map_location=config.DEVICE)["state_dict"]) model.eval() # Initialize total correct number and counter num_correct = 0.0 count = 0 # Loop through dataset with torch.no_grad(): loop = tqdm(loader, position=0, leave=True) for batch_idx, (inputs, z, x) in enumerate(loop): # Enable GPU support is available inputs = inputs.to(config.DEVICE) if config.TRAIN_TYPE == 'z': targets = z.unsqueeze(1).to(torch.float32).to(config.DEVICE) else: targets = x.unsqueeze(1).to(torch.float32).to(config.DEVICE) # Calculate prediction predictions = model(inputs) # Update total correct number and counter num_correct += sum(abs(torch.round(targets/config.ERROR_TOLERENCE) - torch.round(predictions/config.ERROR_TOLERENCE)) <= 1).item() count += predictions.shape[0] # Calculate accuracy loop.set_postfix(accuracy=100*num_correct/count) #___Driver Program: if __name__ == "__main__": main() # # end of file """ANI717"""
deep learning/test/test.py
2,083
Script to Test Deep Learning Model. Contains a pipeline to test a deep learning model. Revision History: 2021-11-20 (ANI717 - Animesh Bala Ani): Baseline Software. Example: $ python3 test.py !/usr/bin/env python -*- coding: utf-8 -*-___Import Modules:___Main Method: Load Data Initialize Model with Weights Initialize total correct number and counter Loop through dataset Enable GPU support is available Calculate prediction Update total correct number and counter Calculate accuracy___Driver Program: end of file
611
en
0.640964
# -*- coding: utf-8 -*- # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/stable/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('../../')) print(sys.path) # -- Project information ----------------------------------------------------- project = u'NiaPy' copyright = u'2018, NiaOrg' author = u'Grega Vrbančič, Lucija Brezočnik, Uroš Mlakar, Dušan Fister, Iztok Fister Jr., Klemen Berkovič, Jan Popič' # The short X.Y version version = u'' # The full version, including alpha/beta/rc tags release = u'0.0.0.' # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon' ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path . exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'NiaPydoc' # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'NiaPy.tex', u'NiaPy Documentation', u'Grega Vrbančič, Lucija Brezočnik, Uroš Mlakar, Dušan Fister, Iztok Fister Jr.', 'manual'), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'niapy', u'NiaPy Documentation', [author], 1) ] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'NiaPy', u'NiaPy Documentation', author, 'NiaPy', 'One line description of project.', 'Miscellaneous'), ] # -- Extension configuration ------------------------------------------------- autoclass_content = 'both' # -- Options for intersphinx extension --------------------------------------- # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'https://docs.python.org/': None} # -- Options for todo extension ---------------------------------------------- # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # A boolean that decides whether parentheses are appended to function and method role text (e.g. the content of :func:`input`) to signify that the name is callable. Default is True add_function_parentheses = True # Napolen settings # chekc https://sphinxcontrib-napoleon.readthedocs.io/en/latest/sphinxcontrib.napoleon.html napoleon_google_docstring = True napoleon_numpy_docstring = False napoleon_include_init_with_doc = True napoleon_include_private_with_doc = True napoleon_include_special_with_doc = True napoleon_use_admonition_for_examples = False napoleon_use_admonition_for_notes = False napoleon_use_admonition_for_references = False napoleon_use_ivar = True napoleon_use_param = True napoleon_use_rtype = True napoleon_use_keyword = True napoleon_custom_sections = None import matplotlib matplotlib.use('agg')
docs/source/conf.py
6,439
-*- coding: utf-8 -*- Configuration file for the Sphinx documentation builder. This file does only contain a selection of the most common options. For a full list see the documentation: http://www.sphinx-doc.org/en/stable/config -- Path setup -------------------------------------------------------------- If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. -- Project information ----------------------------------------------------- The short X.Y version The full version, including alpha/beta/rc tags -- General configuration --------------------------------------------------- If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.0' Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. The suffix(es) of source filenames. You can specify multiple suffix as a list of string: source_suffix = ['.rst', '.md'] The master toctree document. The language for content autogenerated by Sphinx. Refer to documentation for a list of supported languages. This is also used if you do content translation via gettext catalogs. Usually you set "language" from the command line for these cases. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This pattern also affects html_static_path and html_extra_path . The name of the Pygments (syntax highlighting) style to use. -- Options for HTML output ------------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. Theme options are theme-specific and customize the look and feel of a theme further. For a list of options available for each theme, see the documentation. html_theme_options = {} Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". Custom sidebar templates, must be a dictionary that maps document names to template names. The default sidebars (for documents that don't match any pattern) are defined by theme itself. Builtin themes are using these templates by default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html']``. html_sidebars = {} -- Options for HTMLHelp output --------------------------------------------- Output file base name for HTML help builder. -- Options for LaTeX output ------------------------------------------------ The paper size ('letterpaper' or 'a4paper'). 'papersize': 'letterpaper', The font size ('10pt', '11pt' or '12pt'). 'pointsize': '10pt', Additional stuff for the LaTeX preamble. 'preamble': '', Latex figure (float) alignment 'figure_align': 'htbp', Grouping the document tree into LaTeX files. List of tuples (source start file, target name, title, author, documentclass [howto, manual, or own class]). -- Options for manual page output ------------------------------------------ One entry per manual page. List of tuples (source start file, name, description, authors, manual section). -- Options for Texinfo output ---------------------------------------------- Grouping the document tree into Texinfo files. List of tuples (source start file, target name, title, author, dir menu entry, description, category) -- Extension configuration ------------------------------------------------- -- Options for intersphinx extension --------------------------------------- Example configuration for intersphinx: refer to the Python standard library. -- Options for todo extension ---------------------------------------------- If true, `todo` and `todoList` produce output, else they produce nothing. A boolean that decides whether parentheses are appended to function and method role text (e.g. the content of :func:`input`) to signify that the name is callable. Default is True Napolen settings chekc https://sphinxcontrib-napoleon.readthedocs.io/en/latest/sphinxcontrib.napoleon.html
4,305
en
0.612787
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 5/15/20 4:49 PM # @File : grover.py # qubit number=4 # total number=8 import cirq import cirq.google as cg from typing import Optional import sys from math import log2 import numpy as np #thatsNoCode def make_circuit(n: int, input_qubit): c = cirq.Circuit() # circuit begin c.append(cirq.H.on(input_qubit[0])) # number=1 c.append(cirq.H.on(input_qubit[1])) # number=2 c.append(cirq.rx(1.6147786239451536).on(input_qubit[3])) # number=5 c.append(cirq.H.on(input_qubit[2])) # number=3 c.append(cirq.H.on(input_qubit[3])) # number=4 c.append(cirq.X.on(input_qubit[1])) # number=6 c.append(cirq.X.on(input_qubit[1])) # number=7 # circuit end return c def bitstring(bits): return ''.join(str(int(b)) for b in bits) if __name__ == '__main__': qubit_count = 4 input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)] circuit = make_circuit(qubit_count,input_qubits) circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap') circuit_sample_count =2000 info = cirq.final_state_vector(circuit) qubits = round(log2(len(info))) frequencies = { np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3) for i in range(2 ** qubits) } writefile = open("../data/startCirq_Class18.csv","w+") print(format(frequencies),file=writefile) print("results end", file=writefile) print(circuit.__len__(), file=writefile) print(circuit,file=writefile) writefile.close()
data/cirq_new/cirq_program/startCirq_Class18.py
1,584
!/usr/bin/env python -*- coding: utf-8 -*- @Time : 5/15/20 4:49 PM @File : grover.py qubit number=4 total number=8thatsNoCode circuit begin number=1 number=2 number=5 number=3 number=4 number=6 number=7 circuit end
220
en
0.336016
# simple example demonstrating how to control a Tello using your keyboard. # For a more fully featured example see manual-control-pygame.py # # Use W, A, S, D for moving, E, Q for rotating and R, F for going up and down. # When starting the script the Tello will takeoff, pressing ESC makes it land # and the script exit. # 简单的演示如何用键盘控制Tello # 欲使用全手动控制请查看 manual-control-pygame.py # # W, A, S, D 移动, E, Q 转向,R、F上升与下降. # 开始运行程序时Tello会自动起飞,按ESC键降落 # 并且程序会退出 from djitellopy import Tello import cv2, math, time tello = Tello() tello.connect() tello.streamon() frame_read = tello.get_frame_read() height, width, _ = frame_read.frame.shape # tello.takeoff() nSnap = 0 # w = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # h = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) w = width h= height folder = "." name = "snapshot" fileName = "%s/%s_%d_%d_" %(folder, name, w, h) while True: # In reality you want to display frames in a seperate thread. Otherwise # they will freeze while the drone moves. # 在实际开发里请在另一个线程中显示摄像头画面,否则画面会在无人机移动时静止 img = frame_read.frame cv2.imshow("drone", img) # height, width, _ = frame_read.frame.shape # video = cv2.VideoWriter('video.avi', cv2.VideoWriter_fourcc(*'XVID'), 30, (width, height)) key = cv2.waitKey(1) & 0xff if key == 27: # ESC break elif key == ord('w'): tello.move_forward(30) elif key == ord('s'): tello.move_back(30) elif key == ord('a'): tello.move_left(30) elif key == ord('d'): tello.move_right(30) elif key == ord('e'): tello.rotate_clockwise(30) elif key == ord('q'): tello.rotate_counter_clockwise(30) elif key == ord('r'): tello.send_command_with_return('downvision 0') frame_read = tello.get_frame_read() elif key == ord('f'): tello.send_command_with_return('downvision 1') frame_read = tello.get_frame_read() elif key == ord(' '): print("Saving image ", nSnap) cv2.imwrite("%s%d-jpg"%(fileName, nSnap), img) nSnap += 1 # tello.land()
examples/manual-control-opencv.py
2,264
simple example demonstrating how to control a Tello using your keyboard. For a more fully featured example see manual-control-pygame.py Use W, A, S, D for moving, E, Q for rotating and R, F for going up and down. When starting the script the Tello will takeoff, pressing ESC makes it land and the script exit. 简单的演示如何用键盘控制Tello 欲使用全手动控制请查看 manual-control-pygame.py W, A, S, D 移动, E, Q 转向,R、F上升与下降. 开始运行程序时Tello会自动起飞,按ESC键降落 并且程序会退出 tello.takeoff() w = cap.get(cv2.CAP_PROP_FRAME_WIDTH) h = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) In reality you want to display frames in a seperate thread. Otherwise they will freeze while the drone moves. 在实际开发里请在另一个线程中显示摄像头画面,否则画面会在无人机移动时静止 height, width, _ = frame_read.frame.shape video = cv2.VideoWriter('video.avi', cv2.VideoWriter_fourcc(*'XVID'), 30, (width, height)) ESC tello.land()
836
en
0.517609
# -*- coding: utf-8 -*- """Base exchange class""" # ----------------------------------------------------------------------------- __version__ = '1.17.322' # ----------------------------------------------------------------------------- from ccxt.base.errors import ExchangeError from ccxt.base.errors import NetworkError from ccxt.base.errors import NotSupported from ccxt.base.errors import AuthenticationError from ccxt.base.errors import DDoSProtection from ccxt.base.errors import RequestTimeout from ccxt.base.errors import ExchangeNotAvailable from ccxt.base.errors import InvalidAddress # ----------------------------------------------------------------------------- from ccxt.base.decimal_to_precision import decimal_to_precision from ccxt.base.decimal_to_precision import DECIMAL_PLACES, TRUNCATE, ROUND # ----------------------------------------------------------------------------- __all__ = [ 'Exchange', ] # ----------------------------------------------------------------------------- # Python 2 & 3 import logging import base64 import calendar import collections import datetime from email.utils import parsedate import functools import gzip import hashlib import hmac import io import json import math from numbers import Number import re from requests import Session from requests.utils import default_user_agent from requests.exceptions import HTTPError, Timeout, TooManyRedirects, RequestException # import socket from ssl import SSLError # import sys import time import uuid import zlib from decimal import Decimal # ----------------------------------------------------------------------------- try: basestring # basestring was removed in python 3.0 except NameError: basestring = str # ----------------------------------------------------------------------------- try: import urllib.parse as _urlencode # Python 3 except ImportError: import urllib as _urlencode # Python 2 # ----------------------------------------------------------------------------- # web3/0x imports try: # from web3.auto import w3 from web3 import Web3, HTTPProvider from web3.utils.encoding import hex_encode_abi_type except ImportError: Web3 = HTTPProvider = None # web3/0x not supported in Python 2 # ----------------------------------------------------------------------------- class Exchange(object): """Base exchange class""" id = None version = None certified = False # rate limiter settings enableRateLimit = False rateLimit = 2000 # milliseconds = seconds * 1000 timeout = 10000 # milliseconds = seconds * 1000 asyncio_loop = None aiohttp_proxy = None aiohttp_trust_env = False session = None # Session () by default logger = None # logging.getLogger(__name__) by default userAgent = None userAgents = { 'chrome': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36', 'chrome39': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36', } verbose = False markets = None symbols = None fees = { 'trading': { 'fee_loaded': False, 'percentage': True, # subclasses should rarely have to redefine this }, 'funding': { 'fee_loaded': False, 'withdraw': {}, 'deposit': {}, }, } ids = None tickers = None api = None parseJsonResponse = True proxy = '' origin = '*' # CORS origin proxies = None hostname = None # in case of inaccessibility of the "main" domain apiKey = '' secret = '' password = '' uid = '' privateKey = '' # a "0x"-prefixed hexstring private key for a wallet walletAddress = '' # the wallet address "0x"-prefixed hexstring twofa = False marketsById = None markets_by_id = None currencies_by_id = None precision = None limits = None exceptions = None httpExceptions = { '422': ExchangeError, '418': DDoSProtection, '429': DDoSProtection, '404': ExchangeNotAvailable, '409': ExchangeNotAvailable, '500': ExchangeNotAvailable, '501': ExchangeNotAvailable, '502': ExchangeNotAvailable, '520': ExchangeNotAvailable, '521': ExchangeNotAvailable, '522': ExchangeNotAvailable, '525': ExchangeNotAvailable, '400': ExchangeNotAvailable, '403': ExchangeNotAvailable, '405': ExchangeNotAvailable, '503': ExchangeNotAvailable, '530': ExchangeNotAvailable, '408': RequestTimeout, '504': RequestTimeout, '401': AuthenticationError, '511': AuthenticationError, } headers = None balance = None orderbooks = None orders = None trades = None transactions = None currencies = None options = None # Python does not allow to define properties in run-time with setattr requiredCredentials = { 'apiKey': True, 'secret': True, 'uid': False, 'login': False, 'password': False, 'twofa': False, # 2-factor authentication (one-time password key) 'privateKey': False, # a "0x"-prefixed hexstring private key for a wallet 'walletAddress': False, # the wallet address "0x"-prefixed hexstring } # API method metainfo has = { 'publicAPI': True, 'privateAPI': True, 'CORS': False, 'cancelOrder': True, 'cancelOrders': False, 'createDepositAddress': False, 'createOrder': True, 'createMarketOrder': True, 'createLimitOrder': True, 'deposit': False, 'editOrder': 'emulated', 'fetchBalance': True, 'fetchClosedOrders': False, 'fetchCurrencies': False, 'fetchDepositAddress': False, 'fetchDeposits': False, 'fetchFundingFees': False, 'fetchL2OrderBook': True, 'fetchMarkets': True, 'fetchMyTrades': False, 'fetchOHLCV': 'emulated', 'fetchOpenOrders': False, 'fetchOrder': False, 'fetchOrderBook': True, 'fetchOrderBooks': False, 'fetchOrders': False, 'fetchTicker': True, 'fetchTickers': False, 'fetchTrades': True, 'fetchTradingFees': False, 'fetchTradingLimits': False, 'fetchTransactions': False, 'fetchWithdrawals': False, 'withdraw': False, } precisionMode = DECIMAL_PLACES minFundingAddressLength = 1 # used in check_address substituteCommonCurrencyCodes = True lastRestRequestTimestamp = 0 lastRestPollTimestamp = 0 restRequestQueue = None restPollerLoopIsRunning = False rateLimitTokens = 16 rateLimitMaxTokens = 16 rateLimitUpdateTime = 0 enableLastHttpResponse = True enableLastJsonResponse = True enableLastResponseHeaders = True last_http_response = None last_json_response = None last_response_headers = None web3 = None commonCurrencies = { 'XBT': 'BTC', 'BCC': 'BCH', 'DRK': 'DASH', } def __init__(self, config={}): self.precision = dict() if self.precision is None else self.precision self.limits = dict() if self.limits is None else self.limits self.exceptions = dict() if self.exceptions is None else self.exceptions self.headers = dict() if self.headers is None else self.headers self.balance = dict() if self.balance is None else self.balance self.orderbooks = dict() if self.orderbooks is None else self.orderbooks self.orders = dict() if self.orders is None else self.orders self.trades = dict() if self.trades is None else self.trades self.transactions = dict() if self.transactions is None else self.transactions self.currencies = dict() if self.currencies is None else self.currencies self.options = dict() if self.options is None else self.options # Python does not allow to define properties in run-time with setattr self.decimalToPrecision = self.decimal_to_precision = decimal_to_precision # version = '.'.join(map(str, sys.version_info[:3])) # self.userAgent = { # 'User-Agent': 'ccxt/' + __version__ + ' (+https://github.com/ccxt/ccxt) Python/' + version # } self.userAgent = default_user_agent() settings = self.deep_extend(self.describe(), config) for key in settings: if hasattr(self, key) and isinstance(getattr(self, key), dict): setattr(self, key, self.deep_extend(getattr(self, key), settings[key])) else: setattr(self, key, settings[key]) if self.api: self.define_rest_api(self.api, 'request') if self.markets: self.set_markets(self.markets) # convert all properties from underscore notation foo_bar to camelcase notation fooBar for name in dir(self): if name[0] != '_'and name[-1] != '_' and '_' in name: parts = name.split('_') camelcase = parts[0] + ''.join(self.capitalize(i) for i in parts[1:]) setattr(self, camelcase, getattr(self, name)) self.tokenBucket = self.extend({ 'refillRate': 1.0 / self.rateLimit, 'delay': 1.0, 'capacity': 1.0, 'defaultCost': 1.0, }, getattr(self, 'tokenBucket') if hasattr(self, 'tokenBucket') else {}) self.session = self.session if self.session else Session() self.logger = self.logger if self.logger else logging.getLogger(__name__) if Web3 and not self.web3: # self.web3 = w3 if w3 else Web3(HTTPProvider()) self.web3 = Web3(HTTPProvider()) def __del__(self): if self.session: self.session.close() def describe(self): return {} def define_rest_api(self, api, method_name, options={}): delimiters = re.compile('[^a-zA-Z0-9]') for api_type, methods in api.items(): for http_method, urls in methods.items(): for url in urls: url = url.strip() split_path = delimiters.split(url) uppercase_method = http_method.upper() lowercase_method = http_method.lower() camelcase_method = lowercase_method.capitalize() camelcase_suffix = ''.join([Exchange.capitalize(x) for x in split_path]) lowercase_path = [x.strip().lower() for x in split_path] underscore_suffix = '_'.join([k for k in lowercase_path if len(k)]) camelcase = api_type + camelcase_method + Exchange.capitalize(camelcase_suffix) underscore = api_type + '_' + lowercase_method + '_' + underscore_suffix.lower() if 'suffixes' in options: if 'camelcase' in options['suffixes']: camelcase += options['suffixes']['camelcase'] if 'underscore' in options['suffixes']: underscore += options['suffixes']['underscore'] partial = functools.partial(getattr(self, method_name), url, api_type, uppercase_method) setattr(self, camelcase, partial) setattr(self, underscore, partial) def raise_error(self, exception_type, url=None, method=None, error=None, details=None): if error: error = str(error) output = ' '.join([self.id] + [var for var in (url, method, error, details) if var is not None]) raise exception_type(output) def throttle(self): now = float(self.milliseconds()) elapsed = now - self.lastRestRequestTimestamp if elapsed < self.rateLimit: delay = self.rateLimit - elapsed time.sleep(delay / 1000.0) def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None): """A better wrapper over request for deferred signing""" if self.enableRateLimit: self.throttle() self.lastRestRequestTimestamp = self.milliseconds() request = self.sign(path, api, method, params, headers, body) return self.fetch(request['url'], request['method'], request['headers'], request['body']) def request(self, path, api='public', method='GET', params={}, headers=None, body=None): return self.fetch2(path, api, method, params, headers, body) @staticmethod def gzip_deflate(response, text): encoding = response.info().get('Content-Encoding') if encoding in ('gzip', 'x-gzip', 'deflate'): if encoding == 'deflate': return zlib.decompress(text, -zlib.MAX_WBITS) else: return gzip.GzipFile('', 'rb', 9, io.BytesIO(text)).read() return text def find_broadly_matched_key(self, broad, string): """A helper method for matching error strings exactly vs broadly""" keys = list(broad.keys()) for i in range(0, len(keys)): key = keys[i] if string.find(key) >= 0: return key return None def handle_errors(self, code, reason, url, method, headers, body): pass def prepare_request_headers(self, headers=None): headers = headers or {} headers.update(self.headers) if self.userAgent: if type(self.userAgent) is str: headers.update({'User-Agent': self.userAgent}) elif (type(self.userAgent) is dict) and ('User-Agent' in self.userAgent): headers.update(self.userAgent) if self.proxy: headers.update({'Origin': self.origin}) headers.update({'Accept-Encoding': 'gzip, deflate'}) return headers def fetch(self, url, method='GET', headers=None, body=None): """Perform a HTTP request and return decoded JSON data""" request_headers = self.prepare_request_headers(headers) url = self.proxy + url if self.verbose: print("\nRequest:", method, url, request_headers, body) self.logger.debug("%s %s, Request: %s %s", method, url, request_headers, body) if body: body = body.encode() self.session.cookies.clear() response = None http_response = None try: response = self.session.request( method, url, data=body, headers=request_headers, timeout=int(self.timeout / 1000), proxies=self.proxies ) http_response = response.text if self.enableLastHttpResponse: self.last_http_response = http_response headers = response.headers if self.enableLastResponseHeaders: self.last_response_headers = headers if self.verbose: print("\nResponse:", method, url, str(response.status_code), str(headers), http_response) self.logger.debug("%s %s, Response: %s %s %s", method, url, response.status_code, headers, http_response) response.raise_for_status() except Timeout as e: self.raise_error(RequestTimeout, method, url, e) except TooManyRedirects as e: self.raise_error(ExchangeError, url, method, e) except SSLError as e: self.raise_error(ExchangeError, url, method, e) except HTTPError as e: self.handle_errors(response.status_code, response.reason, url, method, headers, http_response) self.handle_rest_errors(e, response.status_code, http_response, url, method) self.raise_error(ExchangeError, url, method, e, http_response) except RequestException as e: # base exception class error_string = str(e) if ('ECONNRESET' in error_string) or ('Connection aborted.' in error_string): self.raise_error(NetworkError, url, method, e) else: self.raise_error(ExchangeError, url, method, e) self.handle_errors(response.status_code, response.reason, url, method, None, http_response) return self.handle_rest_response(http_response, url, method, headers, body) def handle_rest_errors(self, exception, http_status_code, response, url, method='GET'): error = None string_code = str(http_status_code) if string_code in self.httpExceptions: error = self.httpExceptions[string_code] if error == ExchangeNotAvailable: if re.search('(cloudflare|incapsula|overload|ddos)', response, flags=re.IGNORECASE): error = DDoSProtection if error: self.raise_error(error, url, method, exception if exception else http_status_code, response) def handle_rest_response(self, response, url, method='GET', headers=None, body=None): try: if self.parseJsonResponse: json_response = json.loads(response) if len(response) > 1 else None if self.enableLastJsonResponse: self.last_json_response = json_response return json_response else: return response except ValueError as e: # ValueError == JsonDecodeError ddos_protection = re.search('(cloudflare|incapsula|overload|ddos)', response, flags=re.IGNORECASE) exchange_not_available = re.search('(offline|busy|retry|wait|unavailable|maintain|maintenance|maintenancing)', response, flags=re.IGNORECASE) if ddos_protection: self.raise_error(DDoSProtection, method, url, None, response) if exchange_not_available: message = response + ' exchange downtime, exchange closed for maintenance or offline, DDoS protection or rate-limiting in effect' self.raise_error(ExchangeNotAvailable, method, url, None, message) self.raise_error(ExchangeError, method, url, e, response) @staticmethod def safe_float(dictionary, key, default_value=None): value = default_value try: if isinstance(dictionary, list) and isinstance(key, int) and len(dictionary) > key: value = float(dictionary[key]) else: value = float(dictionary[key]) if (key is not None) and (key in dictionary) and (dictionary[key] is not None) else default_value except ValueError as e: value = default_value return value @staticmethod def safe_string(dictionary, key, default_value=None): return str(dictionary[key]) if key is not None and (key in dictionary) and dictionary[key] is not None else default_value @staticmethod def safe_integer(dictionary, key, default_value=None): if key is None or (key not in dictionary): return default_value value = dictionary[key] if isinstance(value, Number) or (isinstance(value, basestring) and value.isnumeric()): return int(value) return default_value @staticmethod def safe_value(dictionary, key, default_value=None): return dictionary[key] if key is not None and (key in dictionary) and dictionary[key] is not None else default_value # we're not using safe_floats with a list argument as we're trying to save some cycles here # we're not using safe_float_3 either because those cases are too rare to deserve their own optimization @staticmethod def safe_float_2(dictionary, key1, key2, default_value=None): return Exchange.safe_either(Exchange.safe_float, dictionary, key1, key2, default_value) @staticmethod def safe_string_2(dictionary, key1, key2, default_value=None): return Exchange.safe_either(Exchange.safe_string, dictionary, key1, key2, default_value) @staticmethod def safe_integer_2(dictionary, key1, key2, default_value=None): return Exchange.safe_either(Exchange.safe_integer, dictionary, key1, key2, default_value) @staticmethod def safe_value_2(dictionary, key1, key2, default_value=None): return Exchange.safe_either(Exchange.safe_value, dictionary, key1, key2, default_value) @staticmethod def safe_either(method, dictionary, key1, key2, default_value=None): """A helper-wrapper for the safe_value_2() family.""" value = method(dictionary, key1) return value if value is not None else method(dictionary, key2, default_value) @staticmethod def truncate(num, precision=0): """Deprecated, use decimal_to_precision instead""" if precision > 0: decimal_precision = math.pow(10, precision) return math.trunc(num * decimal_precision) / decimal_precision return int(Exchange.truncate_to_string(num, precision)) @staticmethod def truncate_to_string(num, precision=0): """Deprecated, todo: remove references from subclasses""" if precision > 0: parts = ('{0:.%df}' % precision).format(Decimal(num)).split('.') decimal_digits = parts[1][:precision].rstrip('0') decimal_digits = decimal_digits if len(decimal_digits) else '0' return parts[0] + '.' + decimal_digits return ('%d' % num) @staticmethod def uuid(): return str(uuid.uuid4()) @staticmethod def capitalize(string): # first character only, rest characters unchanged # the native pythonic .capitalize() method lowercases all other characters # which is an unwanted behaviour, therefore we use this custom implementation # check it yourself: print('foobar'.capitalize(), 'fooBar'.capitalize()) if len(string) > 1: return "%s%s" % (string[0].upper(), string[1:]) return string.upper() @staticmethod def keysort(dictionary): return collections.OrderedDict(sorted(dictionary.items(), key=lambda t: t[0])) @staticmethod def extend(*args): if args is not None: result = None if type(args[0]) is collections.OrderedDict: result = collections.OrderedDict() else: result = {} for arg in args: result.update(arg) return result return {} @staticmethod def deep_extend(*args): result = None for arg in args: if isinstance(arg, dict): if not isinstance(result, dict): result = {} for key in arg: result[key] = Exchange.deep_extend(result[key] if key in result else None, arg[key]) else: result = arg return result @staticmethod def filter_by(array, key, value=None): if value: grouped = Exchange.group_by(array, key) if value in grouped: return grouped[value] return [] return array @staticmethod def filterBy(self, array, key, value=None): return Exchange.filter_by(array, key, value) @staticmethod def group_by(array, key): result = {} array = Exchange.to_array(array) array = [entry for entry in array if (key in entry) and (entry[key] is not None)] for entry in array: if entry[key] not in result: result[entry[key]] = [] result[entry[key]].append(entry) return result @staticmethod def groupBy(array, key): return Exchange.group_by(array, key) @staticmethod def index_by(array, key): result = {} if type(array) is dict: array = Exchange.keysort(array).values() for element in array: if (key in element) and (element[key] is not None): k = element[key] result[k] = element return result @staticmethod def sort_by(array, key, descending=False): return sorted(array, key=lambda k: k[key] if k[key] is not None else "", reverse=descending) @staticmethod def array_concat(a, b): return a + b @staticmethod def in_array(needle, haystack): return needle in haystack @staticmethod def is_empty(object): return not object @staticmethod def extract_params(string): return re.findall(r'{([\w-]+)}', string) @staticmethod def implode_params(string, params): for key in params: string = string.replace('{' + key + '}', str(params[key])) return string @staticmethod def url(path, params={}): result = Exchange.implode_params(path, params) query = Exchange.omit(params, Exchange.extract_params(path)) if query: result += '?' + _urlencode.urlencode(query) return result @staticmethod def urlencode(params={}): if (type(params) is dict) or isinstance(params, collections.OrderedDict): return _urlencode.urlencode(params) return params @staticmethod def rawencode(params={}): return _urlencode.unquote(Exchange.urlencode(params)) @staticmethod def encode_uri_component(uri): return _urlencode.quote(uri, safe="~()*!.'") @staticmethod def omit(d, *args): result = d.copy() for arg in args: if type(arg) is list: for key in arg: if key in result: del result[key] else: if arg in result: del result[arg] return result @staticmethod def unique(array): return list(set(array)) @staticmethod def pluck(array, key): return [ element[key] for element in array if (key in element) and (element[key] is not None) ] @staticmethod def sum(*args): return sum([arg for arg in args if isinstance(arg, (float, int))]) @staticmethod def ordered(array): return collections.OrderedDict(array) @staticmethod def aggregate(bidasks): ordered = Exchange.ordered({}) for [price, volume] in bidasks: if volume > 0: ordered[price] = (ordered[price] if price in ordered else 0) + volume result = [] items = list(ordered.items()) for price, volume in items: result.append([price, volume]) return result @staticmethod def sec(): return Exchange.seconds() @staticmethod def msec(): return Exchange.milliseconds() @staticmethod def usec(): return Exchange.microseconds() @staticmethod def seconds(): return int(time.time()) @staticmethod def milliseconds(): return int(time.time() * 1000) @staticmethod def microseconds(): return int(time.time() * 1000000) @staticmethod def iso8601(timestamp=None): if timestamp is None: return timestamp if not isinstance(timestamp, int): return None if int(timestamp) < 0: return None try: utc = datetime.datetime.utcfromtimestamp(timestamp // 1000) return utc.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-6] + "{:03d}".format(int(timestamp) % 1000) + 'Z' except (TypeError, OverflowError, OSError): return None @staticmethod def dmy(timestamp, infix='-'): utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000))) return utc_datetime.strftime('%m' + infix + '%d' + infix + '%Y') @staticmethod def ymd(timestamp, infix='-'): utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000))) return utc_datetime.strftime('%Y' + infix + '%m' + infix + '%d') @staticmethod def ymdhms(timestamp, infix=' '): utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000))) return utc_datetime.strftime('%Y-%m-%d' + infix + '%H:%M:%S') @staticmethod def parse_date(timestamp=None): if timestamp is None: return timestamp if not isinstance(timestamp, str): return None if 'GMT' in timestamp: try: string = ''.join([str(value) for value in parsedate(timestamp)[:6]]) + '.000Z' dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ") return calendar.timegm(dt.utctimetuple()) * 1000 except (TypeError, OverflowError, OSError): return None else: return Exchange.parse8601(timestamp) @staticmethod def parse8601(timestamp=None): if timestamp is None: return timestamp yyyy = '([0-9]{4})-?' mm = '([0-9]{2})-?' dd = '([0-9]{2})(?:T|[\\s])?' h = '([0-9]{2}):?' m = '([0-9]{2}):?' s = '([0-9]{2})' ms = '(\\.[0-9]{1,3})?' tz = '(?:(\\+|\\-)([0-9]{2})\\:?([0-9]{2})|Z)?' regex = r'' + yyyy + mm + dd + h + m + s + ms + tz try: match = re.search(regex, timestamp, re.IGNORECASE) if match is None: return None yyyy, mm, dd, h, m, s, ms, sign, hours, minutes = match.groups() ms = ms or '.000' msint = int(ms[1:]) sign = sign or '' sign = int(sign + '1') hours = int(hours or 0) * sign minutes = int(minutes or 0) * sign offset = datetime.timedelta(hours=hours, minutes=minutes) string = yyyy + mm + dd + h + m + s + ms + 'Z' dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ") dt = dt + offset return calendar.timegm(dt.utctimetuple()) * 1000 + msint except (TypeError, OverflowError, OSError, ValueError): return None @staticmethod def hash(request, algorithm='md5', digest='hex'): h = hashlib.new(algorithm, request) if digest == 'hex': return h.hexdigest() elif digest == 'base64': return base64.b64encode(h.digest()) return h.digest() @staticmethod def hmac(request, secret, algorithm=hashlib.sha256, digest='hex'): h = hmac.new(secret, request, algorithm) if digest == 'hex': return h.hexdigest() elif digest == 'base64': return base64.b64encode(h.digest()) return h.digest() @staticmethod def binary_concat(*args): result = bytes() for arg in args: result = result + arg return result @staticmethod def binary_to_string(s): return s.decode('ascii') @staticmethod def base64urlencode(s): return Exchange.decode(base64.urlsafe_b64encode(s)).replace('=', '') @staticmethod def jwt(request, secret, algorithm=hashlib.sha256, alg='HS256'): header = Exchange.encode(Exchange.json({ 'alg': alg, 'typ': 'JWT', })) encodedHeader = Exchange.base64urlencode(header) encodedData = Exchange.base64urlencode(Exchange.encode(Exchange.json(request))) token = encodedHeader + '.' + encodedData hmac = Exchange.hmac(Exchange.encode(token), Exchange.encode(secret), algorithm, 'binary') signature = Exchange.base64urlencode(hmac) return token + '.' + signature @staticmethod def unjson(input): return json.loads(input) @staticmethod def json(data, params=None): return json.dumps(data, separators=(',', ':')) @staticmethod def parse_if_json_encoded_object(input): return json.loads(input) if Exchange.is_json_encoded_object(input) else input @staticmethod def is_json_encoded_object(input): return (isinstance(input, basestring) and (len(input) >= 2) and ((input[0] == '{') or (input[0] == '['))) @staticmethod def encode(string): return string.encode() @staticmethod def decode(string): return string.decode() @staticmethod def to_array(value): return list(value.values()) if type(value) is dict else value def nonce(self): return Exchange.seconds() def check_required_credentials(self): keys = list(self.requiredCredentials.keys()) for key in keys: if self.requiredCredentials[key] and not getattr(self, key): self.raise_error(AuthenticationError, details='requires `' + key + '`') def check_address(self, address): """Checks an address is not the same character repeated or an empty sequence""" if address is None: self.raise_error(InvalidAddress, details='address is None') if all(letter == address[0] for letter in address) or len(address) < self.minFundingAddressLength or ' ' in address: self.raise_error(InvalidAddress, details='address is invalid or has less than ' + str(self.minFundingAddressLength) + ' characters: "' + str(address) + '"') return address def account(self): return { 'free': 0.0, 'used': 0.0, 'total': 0.0, } def common_currency_code(self, currency): if not self.substituteCommonCurrencyCodes: return currency return self.safe_string(self.commonCurrencies, currency, currency) def currency_id(self, commonCode): if self.currencies: if commonCode in self.currencies: return self.currencies[commonCode]['id'] currencyIds = {v: k for k, v in self.commonCurrencies.items()} return self.safe_string(currencyIds, commonCode, commonCode) def fromWei(self, amount, unit='ether'): if Web3 is None: self.raise_error(NotSupported, details="ethereum web3 methods require Python 3: https://pythonclock.org") if amount is None: return amount return float(Web3.fromWei(int(amount), unit)) def toWei(self, amount, unit='ether'): if Web3 is None: self.raise_error(NotSupported, details="ethereum web3 methods require Python 3: https://pythonclock.org") if amount is None: return amount return str(Web3.toWei(int(amount), unit)) def precision_from_string(self, string): parts = re.sub(r'0+$', '', string).split('.') return len(parts[1]) if len(parts) > 1 else 0 def cost_to_precision(self, symbol, cost): return self.decimal_to_precision(cost, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode) def price_to_precision(self, symbol, price): return self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode) def amount_to_precision(self, symbol, amount): return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], self.precisionMode) def fee_to_precision(self, symbol, fee): return self.decimal_to_precision(fee, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode) def currency_to_precision(self, currency, fee): return self.decimal_to_precision(fee, ROUND, self.currencies[currency]['precision'], self.precisionMode) def set_markets(self, markets, currencies=None): values = list(markets.values()) if type(markets) is dict else markets for i in range(0, len(values)): values[i] = self.extend( self.fees['trading'], {'precision': self.precision, 'limits': self.limits}, values[i] ) self.markets = self.index_by(values, 'symbol') self.markets_by_id = self.index_by(values, 'id') self.marketsById = self.markets_by_id self.symbols = sorted(list(self.markets.keys())) self.ids = sorted(list(self.markets_by_id.keys())) if currencies: self.currencies = self.deep_extend(currencies, self.currencies) else: base_currencies = [{ 'id': market['baseId'] if 'baseId' in market else market['base'], 'numericId': market['baseNumericId'] if 'baseNumericId' in market else None, 'code': market['base'], 'precision': ( market['precision']['base'] if 'base' in market['precision'] else ( market['precision']['amount'] if 'amount' in market['precision'] else None ) ) if 'precision' in market else 8, } for market in values if 'base' in market] quote_currencies = [{ 'id': market['quoteId'] if 'quoteId' in market else market['quote'], 'numericId': market['quoteNumericId'] if 'quoteNumericId' in market else None, 'code': market['quote'], 'precision': ( market['precision']['quote'] if 'quote' in market['precision'] else ( market['precision']['price'] if 'price' in market['precision'] else None ) ) if 'precision' in market else 8, } for market in values if 'quote' in market] currencies = self.sort_by(base_currencies + quote_currencies, 'code') self.currencies = self.deep_extend(self.index_by(currencies, 'code'), self.currencies) self.currencies_by_id = self.index_by(list(self.currencies.values()), 'id') return self.markets def load_markets(self, reload=False): if not reload: if self.markets: if not self.markets_by_id: return self.set_markets(self.markets) return self.markets markets = self.fetch_markets() currencies = None if self.has['fetchCurrencies']: currencies = self.fetch_currencies() return self.set_markets(markets, currencies) def populate_fees(self): if not (hasattr(self, 'markets') or hasattr(self, 'currencies')): return for currency, data in self.currencies.items(): # try load withdrawal fees from currencies if 'fee' in data and data['fee'] is not None: self.fees['funding']['withdraw'][currency] = data['fee'] self.fees['funding']['fee_loaded'] = True # find a way to populate trading fees from markets def load_fees(self): self.load_markets() self.populate_fees() if not (self.has['fetchTradingFees'] or self.has['fetchFundingFees']): return self.fees fetched_fees = self.fetch_fees() if fetched_fees['funding']: self.fees['funding']['fee_loaded'] = True if fetched_fees['trading']: self.fees['trading']['fee_loaded'] = True self.fees = self.deep_extend(self.fees, fetched_fees) return self.fees def fetch_markets(self): # markets are returned as a list # currencies are returned as a dict # this is for historical reasons # and may be changed for consistency later return self.to_array(self.markets) def fetch_currencies(self, params={}): # markets are returned as a list # currencies are returned as a dict # this is for historical reasons # and may be changed for consistency later return self.currencies def fetch_fees(self): trading = {} funding = {} try: trading = self.fetch_trading_fees() except AuthenticationError: pass except AttributeError: pass try: funding = self.fetch_funding_fees() except AuthenticationError: pass except AttributeError: pass return { 'trading': trading, 'funding': funding, } def create_order(self, symbol, type, side, amount, price=None, params={}): self.raise_error(NotSupported, details='create_order() not implemented yet') def cancel_order(self, id, symbol=None, params={}): self.raise_error(NotSupported, details='cancel_order() not implemented yet') def fetch_bids_asks(self, symbols=None, params={}): self.raise_error(NotSupported, details='API does not allow to fetch all prices at once with a single call to fetch_bids_asks() for now') def fetch_tickers(self, symbols=None, params={}): self.raise_error(NotSupported, details='API does not allow to fetch all tickers at once with a single call to fetch_tickers() for now') def fetch_order_status(self, id, market=None): order = self.fetch_order(id) return order['status'] def purge_cached_orders(self, before): orders = self.to_array(self.orders) orders = [order for order in orders if (order['status'] == 'open') or (order['timestamp'] >= before)] self.orders = self.index_by(orders, 'id') return self.orders def fetch_order(self, id, symbol=None, params={}): self.raise_error(NotSupported, details='fetch_order() is not implemented yet') def fetch_orders(self, symbol=None, since=None, limit=None, params={}): self.raise_error(NotSupported, details='fetch_orders() is not implemented yet') def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}): self.raise_error(NotSupported, details='fetch_open_orders() is not implemented yet') def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}): self.raise_error(NotSupported, details='fetch_closed_orders() is not implemented yet') def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}): self.raise_error(NotSupported, details='fetch_my_trades() is not implemented yet') def fetch_order_trades(self, id, symbol=None, params={}): self.raise_error(NotSupported, details='fetch_order_trades() is not implemented yet') def fetch_transactions(self, symbol=None, since=None, limit=None, params={}): self.raise_error(NotSupported, details='fetch_transactions() is not implemented yet') def fetch_deposits(self, symbol=None, since=None, limit=None, params={}): self.raise_error(NotSupported, details='fetch_deposits() is not implemented yet') def fetch_withdrawals(self, symbol=None, since=None, limit=None, params={}): self.raise_error(NotSupported, details='fetch_withdrawals() is not implemented yet') def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None): return ohlcv[0:6] if isinstance(ohlcv, list) else ohlcv def parse_ohlcvs(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None): ohlcvs = self.to_array(ohlcvs) num_ohlcvs = len(ohlcvs) result = [] i = 0 while i < num_ohlcvs: if limit and (len(result) >= limit): break ohlcv = self.parse_ohlcv(ohlcvs[i], market, timeframe, since, limit) i = i + 1 if since and (ohlcv[0] < since): continue result.append(ohlcv) return self.sort_by(result, 0) def parse_bid_ask(self, bidask, price_key=0, amount_key=0): return [float(bidask[price_key]), float(bidask[amount_key])] def parse_bids_asks(self, bidasks, price_key=0, amount_key=1): result = [] if len(bidasks): if type(bidasks[0]) is list: for bidask in bidasks: if bidask[price_key] and bidask[amount_key]: result.append(self.parse_bid_ask(bidask, price_key, amount_key)) elif type(bidasks[0]) is dict: for bidask in bidasks: if (price_key in bidask) and (amount_key in bidask) and (bidask[price_key] and bidask[amount_key]): result.append(self.parse_bid_ask(bidask, price_key, amount_key)) else: self.raise_error(ExchangeError, details='unrecognized bidask format: ' + str(bidasks[0])) return result def fetch_l2_order_book(self, symbol, limit=None, params={}): orderbook = self.fetch_order_book(symbol, limit, params) return self.extend(orderbook, { 'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True), 'asks': self.sort_by(self.aggregate(orderbook['asks']), 0), }) def parse_order_book(self, orderbook, timestamp=None, bids_key='bids', asks_key='asks', price_key=0, amount_key=1): return { 'bids': self.sort_by(self.parse_bids_asks(orderbook[bids_key], price_key, amount_key) if (bids_key in orderbook) and isinstance(orderbook[bids_key], list) else [], 0, True), 'asks': self.sort_by(self.parse_bids_asks(orderbook[asks_key], price_key, amount_key) if (asks_key in orderbook) and isinstance(orderbook[asks_key], list) else [], 0), 'timestamp': timestamp, 'datetime': self.iso8601(timestamp) if timestamp is not None else None, 'nonce': None, } def parse_balance(self, balance): currencies = self.omit(balance, 'info').keys() for account in ['free', 'used', 'total']: balance[account] = {} for currency in currencies: balance[account][currency] = balance[currency][account] return balance def fetch_partial_balance(self, part, params={}): balance = self.fetch_balance(params) return balance[part] def fetch_free_balance(self, params={}): return self.fetch_partial_balance('free', params) def fetch_used_balance(self, params={}): return self.fetch_partial_balance('used', params) def fetch_total_balance(self, params={}): return self.fetch_partial_balance('total', params) def load_trading_limits(self, symbols=None, reload=False, params={}): if self.has['fetchTradingLimits']: if reload or not('limitsLoaded' in list(self.options.keys())): response = self.fetch_trading_limits(symbols) for i in range(0, len(symbols)): symbol = symbols[i] self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol]) self.options['limitsLoaded'] = self.milliseconds() return self.markets def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}): if not self.has['fetchTrades']: self.raise_error(NotSupported, details='fetch_ohlcv() not implemented yet') self.load_markets() trades = self.fetch_trades(symbol, since, limit, params) return self.build_ohlcv(trades, timeframe, since, limit) def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}): return self.fetch_ohlcv(symbol, timeframe, since, limit, params) def convert_trading_view_to_ohlcv(self, ohlcvs): result = [] for i in range(0, len(ohlcvs['t'])): result.append([ ohlcvs['t'][i] * 1000, ohlcvs['o'][i], ohlcvs['h'][i], ohlcvs['l'][i], ohlcvs['c'][i], ohlcvs['v'][i], ]) return result def convert_ohlcv_to_trading_view(self, ohlcvs): result = { 't': [], 'o': [], 'h': [], 'l': [], 'c': [], 'v': [], } for i in range(0, len(ohlcvs)): result['t'].append(int(ohlcvs[i][0] / 1000)) result['o'].append(ohlcvs[i][1]) result['h'].append(ohlcvs[i][2]) result['l'].append(ohlcvs[i][3]) result['c'].append(ohlcvs[i][4]) result['v'].append(ohlcvs[i][5]) return result def build_ohlcv(self, trades, timeframe='1m', since=None, limit=None): ms = self.parse_timeframe(timeframe) * 1000 ohlcvs = [] (high, low, close, volume) = (2, 3, 4, 5) num_trades = len(trades) oldest = (num_trades - 1) if limit is None else min(num_trades - 1, limit) for i in range(0, oldest): trade = trades[i] if (since is not None) and (trade['timestamp'] < since): continue opening_time = int(math.floor(trade['timestamp'] / ms) * ms) # Shift the edge of the m/h/d (but not M) j = len(ohlcvs) if (j == 0) or opening_time >= ohlcvs[j - 1][0] + ms: # moved to a new timeframe -> create a new candle from opening trade ohlcvs.append([ opening_time, trade['price'], trade['price'], trade['price'], trade['price'], trade['amount'], ]) else: # still processing the same timeframe -> update opening trade ohlcvs[j - 1][high] = max(ohlcvs[j - 1][high], trade['price']) ohlcvs[j - 1][low] = min(ohlcvs[j - 1][low], trade['price']) ohlcvs[j - 1][close] = trade['price'] ohlcvs[j - 1][volume] += trade['amount'] return ohlcvs def parse_timeframe(self, timeframe): amount = int(timeframe[0:-1]) unit = timeframe[-1] if 'y' in unit: scale = 60 * 60 * 24 * 365 elif 'M' in unit: scale = 60 * 60 * 24 * 30 elif 'w' in unit: scale = 60 * 60 * 24 * 7 elif 'd' in unit: scale = 60 * 60 * 24 elif 'h' in unit: scale = 60 * 60 else: scale = 60 # 1m by default return amount * scale def parse_trades(self, trades, market=None, since=None, limit=None): array = self.to_array(trades) array = [self.parse_trade(trade, market) for trade in array] array = self.sort_by(array, 'timestamp') symbol = market['symbol'] if market else None return self.filter_by_symbol_since_limit(array, symbol, since, limit) def parse_transactions(self, transactions, currency=None, since=None, limit=None): array = self.to_array(transactions) array = [self.parse_transaction(transaction, currency) for transaction in array] array = self.sort_by(array, 'timestamp') code = currency['code'] if currency else None return self.filter_by_currency_since_limit(array, code, since, limit) def parse_orders(self, orders, market=None, since=None, limit=None): array = self.to_array(orders) array = [self.parse_order(order, market) for order in array] array = self.sort_by(array, 'timestamp') symbol = market['symbol'] if market else None return self.filter_by_symbol_since_limit(array, symbol, since, limit) def filter_by_value_since_limit(self, array, field, value=None, since=None, limit=None): array = self.to_array(array) if value: array = [entry for entry in array if entry[field] == value] if since: array = [entry for entry in array if entry['timestamp'] >= since] if limit: array = array[0:limit] return array def filter_by_symbol_since_limit(self, array, symbol=None, since=None, limit=None): return self.filter_by_value_since_limit(array, 'symbol', symbol, since, limit) def filter_by_currency_since_limit(self, array, code=None, since=None, limit=None): return self.filter_by_value_since_limit(array, 'currency', code, since, limit) def filter_by_since_limit(self, array, since=None, limit=None): array = self.to_array(array) if since: array = [entry for entry in array if entry['timestamp'] >= since] if limit: array = array[0:limit] return array def filter_by_symbol(self, array, symbol=None): array = self.to_array(array) if symbol: return [entry for entry in array if entry['symbol'] == symbol] return array def filter_by_array(self, objects, key, values=None, indexed=True): objects = self.to_array(objects) # return all of them if no values were passed in if values is None: return self.index_by(objects, key) if indexed else objects result = [] for i in range(0, len(objects)): value = objects[i][key] if key in objects[i] else None if value in values: result.append(objects[i]) return self.index_by(result, key) if indexed else result def currency(self, code): if not self.currencies: self.raise_error(ExchangeError, details='Currencies not loaded') if isinstance(code, basestring) and (code in self.currencies): return self.currencies[code] self.raise_error(ExchangeError, details='Does not have currency code ' + str(code)) def find_market(self, string): if not self.markets: self.raise_error(ExchangeError, details='Markets not loaded') if isinstance(string, basestring): if string in self.markets_by_id: return self.markets_by_id[string] if string in self.markets: return self.markets[string] return string def find_symbol(self, string, market=None): if market is None: market = self.find_market(string) if isinstance(market, dict): return market['symbol'] return string def market(self, symbol): if not self.markets: self.raise_error(ExchangeError, details='Markets not loaded') if isinstance(symbol, basestring) and (symbol in self.markets): return self.markets[symbol] self.raise_error(ExchangeError, details='No market symbol ' + str(symbol)) def market_ids(self, symbols): return [self.market_id(symbol) for symbol in symbols] def market_id(self, symbol): market = self.market(symbol) return market['id'] if type(market) is dict else symbol def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}): market = self.markets[symbol] rate = market[takerOrMaker] cost = float(self.cost_to_precision(symbol, amount * price)) return { 'rate': rate, 'type': takerOrMaker, 'currency': market['quote'], 'cost': float(self.fee_to_precision(symbol, rate * cost)), } def edit_limit_buy_order(self, id, symbol, *args): return self.edit_limit_order(id, symbol, 'buy', *args) def edit_limit_sell_order(self, id, symbol, *args): return self.edit_limit_order(id, symbol, 'sell', *args) def edit_limit_order(self, id, symbol, *args): return self.edit_order(id, symbol, 'limit', *args) def edit_order(self, id, symbol, *args): if not self.enableRateLimit: self.raise_error(ExchangeError, details='edit_order() requires enableRateLimit = true') self.cancel_order(id, symbol) return self.create_order(symbol, *args) def create_limit_order(self, symbol, *args): return self.create_order(symbol, 'limit', *args) def create_market_order(self, symbol, *args): return self.create_order(symbol, 'market', *args) def create_limit_buy_order(self, symbol, *args): return self.create_order(symbol, 'limit', 'buy', *args) def create_limit_sell_order(self, symbol, *args): return self.create_order(symbol, 'limit', 'sell', *args) def create_market_buy_order(self, symbol, amount, params={}): return self.create_order(symbol, 'market', 'buy', amount, None, params) def create_market_sell_order(self, symbol, amount, params={}): return self.create_order(symbol, 'market', 'sell', amount, None, params) def sign(self, path, api='public', method='GET', params={}, headers=None, body=None): raise NotSupported(self.id + ' sign() pure method must be redefined in derived classes') # ------------------------------------------------------------------------- # web3 / 0x methods def decryptAccountFromJSON(self, value, password): return self.decryptAccount(json.loads(value) if isinstance(value, basestring) else value, password) def decryptAccount(self, key, password): return self.web3.eth.accounts.decrypt(key, password) def decryptAccountFromPrivateKey(self, privateKey): return self.web3.eth.accounts.privateKeyToAccount(privateKey) def soliditySha3(self, array): values = self.solidityValues(array) types = self.solidityTypes(values) return self.web3.soliditySha3(types, values).hex() def soliditySha256(self, values): types = self.solidityTypes(values) solidity_values = self.solidityValues(values) encoded_values = [hex_encode_abi_type(abi_type, value)[2:] for abi_type, value in zip(types, solidity_values)] hex_string = '0x' + ''.join(encoded_values) return '0x' + self.hash(self.encode(self.web3.toText(hex_string)), 'sha256') def solidityTypes(self, array): return ['address' if self.web3.isAddress(value) else 'uint256' for value in array] def solidityValues(self, array): return [self.web3.toChecksumAddress(value) if self.web3.isAddress(value) else int(value) for value in array] def getZeroExOrderHash2(self, order): return self.soliditySha3([ order['exchangeContractAddress'], # address order['maker'], # address order['taker'], # address order['makerTokenAddress'], # address order['takerTokenAddress'], # address order['feeRecipient'], # address order['makerTokenAmount'], # uint256 order['takerTokenAmount'], # uint256 order['makerFee'], # uint256 order['takerFee'], # uint256 order['expirationUnixTimestampSec'], # uint256 order['salt'], # uint256 ]) def getZeroExOrderHash(self, order): unpacked = [ self.web3.toChecksumAddress(order['exchangeContractAddress']), # { value: order.exchangeContractAddress, type: types_1.SolidityTypes.Address }, self.web3.toChecksumAddress(order['maker']), # { value: order.maker, type: types_1.SolidityTypes.Address }, self.web3.toChecksumAddress(order['taker']), # { value: order.taker, type: types_1.SolidityTypes.Address }, self.web3.toChecksumAddress(order['makerTokenAddress']), # { value: order.makerTokenAddress, type: types_1.SolidityTypes.Address }, self.web3.toChecksumAddress(order['takerTokenAddress']), # { value: order.takerTokenAddress, type: types_1.SolidityTypes.Address }, self.web3.toChecksumAddress(order['feeRecipient']), # { value: order.feeRecipient, type: types_1.SolidityTypes.Address }, int(order['makerTokenAmount']), # { value: bigNumberToBN(order.makerTokenAmount), type: types_1.SolidityTypes.Uint256, }, int(order['takerTokenAmount']), # { value: bigNumberToBN(order.takerTokenAmount), type: types_1.SolidityTypes.Uint256, }, int(order['makerFee']), # { value: bigNumberToBN(order.makerFee), type: types_1.SolidityTypes.Uint256, }, int(order['takerFee']), # { value: bigNumberToBN(order.takerFee), type: types_1.SolidityTypes.Uint256, }, int(order['expirationUnixTimestampSec']), # { value: bigNumberToBN(order.expirationUnixTimestampSec), type: types_1.SolidityTypes.Uint256, }, int(order['salt']), # { value: bigNumberToBN(order.salt), type: types_1.SolidityTypes.Uint256 }, ] types = [ 'address', # { value: order.exchangeContractAddress, type: types_1.SolidityTypes.Address }, 'address', # { value: order.maker, type: types_1.SolidityTypes.Address }, 'address', # { value: order.taker, type: types_1.SolidityTypes.Address }, 'address', # { value: order.makerTokenAddress, type: types_1.SolidityTypes.Address }, 'address', # { value: order.takerTokenAddress, type: types_1.SolidityTypes.Address }, 'address', # { value: order.feeRecipient, type: types_1.SolidityTypes.Address }, 'uint256', # { value: bigNumberToBN(order.makerTokenAmount), type: types_1.SolidityTypes.Uint256, }, 'uint256', # { value: bigNumberToBN(order.takerTokenAmount), type: types_1.SolidityTypes.Uint256, }, 'uint256', # { value: bigNumberToBN(order.makerFee), type: types_1.SolidityTypes.Uint256, }, 'uint256', # { value: bigNumberToBN(order.takerFee), type: types_1.SolidityTypes.Uint256, }, 'uint256', # { value: bigNumberToBN(order.expirationUnixTimestampSec), type: types_1.SolidityTypes.Uint256, }, 'uint256', # { value: bigNumberToBN(order.salt), type: types_1.SolidityTypes.Uint256 }, ] return self.web3.soliditySha3(types, unpacked).hex() def signZeroExOrder(self, order): orderHash = self.getZeroExOrderHash(order) signature = self.signMessage(orderHash[-64:], self.privateKey) return self.extend(order, { 'orderHash': orderHash, 'ecSignature': signature, # todo fix v if needed }) def hashMessage(self, message): message_bytes = bytes.fromhex(message) return self.web3.sha3(b"\x19Ethereum Signed Message:\n" + str(len(message_bytes)).encode() + message_bytes).hex() def signHash(self, hash, privateKey): signature = self.web3.eth.account.signHash(hash[-64:], private_key=privateKey[-64:]) return { 'v': signature.v, # integer 'r': self.web3.toHex(signature.r), # '0x'-prefixed hex string 's': self.web3.toHex(signature.s), # '0x'-prefixed hex string } def signMessage(self, message, privateKey): # # The following comment is related to MetaMask, we use the upper type of signature prefix: # # z.ecSignOrderHashAsync ('0xcfdb0a485324ff37699b4c8557f6858f25916fc6fce5993b32fe018aea510b9f', # '0x731fc101bbe102221c91c31ed0489f1ddfc439a3', { # prefixType: 'ETH_SIGN', # shouldAddPrefixBeforeCallingEthSign: true # }).then ((e, r) => console.log (e,r)) # # { ↓ # v: 28, # r: "0xea7a68268b47c48d5d7a4c900e6f9af0015bf70951b3db2f1d835c5d544aaec2", # s: "0x5d1db2a060c955c1fde4c967237b995c2361097405407b33c6046c8aeb3ccbdf" # } # # -------------------------------------------------------------------- # # z.ecSignOrderHashAsync ('0xcfdb0a485324ff37699b4c8557f6858f25916fc6fce5993b32fe018aea510b9f', # '0x731fc101bbe102221c91c31ed0489f1ddfc439a3', { # prefixType: 'NONE', # shouldAddPrefixBeforeCallingEthSign: true # }).then ((e, r) => console.log (e,r)) # # { ↓ # v: 27, # r: "0xc8c710022c57de4f529d448e9b40517dd9bfb49ff1eb245f5856664b865d14a6", # s: "0x0740bb21f4f094fbbdbafa903bb8f057f82e0c6e4fe65d19a1daed4ed97cd394" # } # message_hash = self.hashMessage(message) signature = self.signHash(message_hash[-64:], privateKey[-64:]) return signature
python/ccxt/base/exchange.py
64,385
Base exchange class Checks an address is not the same character repeated or an empty sequence Perform a HTTP request and return decoded JSON data A better wrapper over request for deferred signing A helper method for matching error strings exactly vs broadly A helper-wrapper for the safe_value_2() family. Deprecated, use decimal_to_precision instead Deprecated, todo: remove references from subclasses Base exchange class -*- coding: utf-8 -*- ----------------------------------------------------------------------------- ----------------------------------------------------------------------------- ----------------------------------------------------------------------------- ----------------------------------------------------------------------------- ----------------------------------------------------------------------------- Python 2 & 3 import socket import sys ----------------------------------------------------------------------------- basestring was removed in python 3.0 ----------------------------------------------------------------------------- Python 3 Python 2 ----------------------------------------------------------------------------- web3/0x imports from web3.auto import w3 web3/0x not supported in Python 2 ----------------------------------------------------------------------------- rate limiter settings milliseconds = seconds * 1000 milliseconds = seconds * 1000 Session () by default logging.getLogger(__name__) by default subclasses should rarely have to redefine this CORS origin in case of inaccessibility of the "main" domain a "0x"-prefixed hexstring private key for a wallet the wallet address "0x"-prefixed hexstring Python does not allow to define properties in run-time with setattr 2-factor authentication (one-time password key) a "0x"-prefixed hexstring private key for a wallet the wallet address "0x"-prefixed hexstring API method metainfo used in check_address Python does not allow to define properties in run-time with setattr version = '.'.join(map(str, sys.version_info[:3])) self.userAgent = { 'User-Agent': 'ccxt/' + __version__ + ' (+https://github.com/ccxt/ccxt) Python/' + version } convert all properties from underscore notation foo_bar to camelcase notation fooBar self.web3 = w3 if w3 else Web3(HTTPProvider()) base exception class ValueError == JsonDecodeError we're not using safe_floats with a list argument as we're trying to save some cycles here we're not using safe_float_3 either because those cases are too rare to deserve their own optimization first character only, rest characters unchanged the native pythonic .capitalize() method lowercases all other characters which is an unwanted behaviour, therefore we use this custom implementation check it yourself: print('foobar'.capitalize(), 'fooBar'.capitalize()) try load withdrawal fees from currencies find a way to populate trading fees from markets markets are returned as a list currencies are returned as a dict this is for historical reasons and may be changed for consistency later markets are returned as a list currencies are returned as a dict this is for historical reasons and may be changed for consistency later Shift the edge of the m/h/d (but not M) moved to a new timeframe -> create a new candle from opening trade still processing the same timeframe -> update opening trade 1m by default return all of them if no values were passed in ------------------------------------------------------------------------- web3 / 0x methods address address address address address address uint256 uint256 uint256 uint256 uint256 uint256 { value: order.exchangeContractAddress, type: types_1.SolidityTypes.Address }, { value: order.maker, type: types_1.SolidityTypes.Address }, { value: order.taker, type: types_1.SolidityTypes.Address }, { value: order.makerTokenAddress, type: types_1.SolidityTypes.Address }, { value: order.takerTokenAddress, type: types_1.SolidityTypes.Address }, { value: order.feeRecipient, type: types_1.SolidityTypes.Address }, { value: bigNumberToBN(order.makerTokenAmount), type: types_1.SolidityTypes.Uint256, }, { value: bigNumberToBN(order.takerTokenAmount), type: types_1.SolidityTypes.Uint256, }, { value: bigNumberToBN(order.makerFee), type: types_1.SolidityTypes.Uint256, }, { value: bigNumberToBN(order.takerFee), type: types_1.SolidityTypes.Uint256, }, { value: bigNumberToBN(order.expirationUnixTimestampSec), type: types_1.SolidityTypes.Uint256, }, { value: bigNumberToBN(order.salt), type: types_1.SolidityTypes.Uint256 }, { value: order.exchangeContractAddress, type: types_1.SolidityTypes.Address }, { value: order.maker, type: types_1.SolidityTypes.Address }, { value: order.taker, type: types_1.SolidityTypes.Address }, { value: order.makerTokenAddress, type: types_1.SolidityTypes.Address }, { value: order.takerTokenAddress, type: types_1.SolidityTypes.Address }, { value: order.feeRecipient, type: types_1.SolidityTypes.Address }, { value: bigNumberToBN(order.makerTokenAmount), type: types_1.SolidityTypes.Uint256, }, { value: bigNumberToBN(order.takerTokenAmount), type: types_1.SolidityTypes.Uint256, }, { value: bigNumberToBN(order.makerFee), type: types_1.SolidityTypes.Uint256, }, { value: bigNumberToBN(order.takerFee), type: types_1.SolidityTypes.Uint256, }, { value: bigNumberToBN(order.expirationUnixTimestampSec), type: types_1.SolidityTypes.Uint256, }, { value: bigNumberToBN(order.salt), type: types_1.SolidityTypes.Uint256 }, todo fix v if needed integer '0x'-prefixed hex string '0x'-prefixed hex string The following comment is related to MetaMask, we use the upper type of signature prefix: z.ecSignOrderHashAsync ('0xcfdb0a485324ff37699b4c8557f6858f25916fc6fce5993b32fe018aea510b9f', '0x731fc101bbe102221c91c31ed0489f1ddfc439a3', { prefixType: 'ETH_SIGN', shouldAddPrefixBeforeCallingEthSign: true }).then ((e, r) => console.log (e,r)) { ↓ v: 28, r: "0xea7a68268b47c48d5d7a4c900e6f9af0015bf70951b3db2f1d835c5d544aaec2", s: "0x5d1db2a060c955c1fde4c967237b995c2361097405407b33c6046c8aeb3ccbdf" } -------------------------------------------------------------------- z.ecSignOrderHashAsync ('0xcfdb0a485324ff37699b4c8557f6858f25916fc6fce5993b32fe018aea510b9f', '0x731fc101bbe102221c91c31ed0489f1ddfc439a3', { prefixType: 'NONE', shouldAddPrefixBeforeCallingEthSign: true }).then ((e, r) => console.log (e,r)) { ↓ v: 27, r: "0xc8c710022c57de4f529d448e9b40517dd9bfb49ff1eb245f5856664b865d14a6", s: "0x0740bb21f4f094fbbdbafa903bb8f057f82e0c6e4fe65d19a1daed4ed97cd394" }
6,793
en
0.513076
from pydub import AudioSegment from pydub.playback import play import os import utils class audiofile: def __init__(self, file): """ Init audio stream """ self.file = file def play(self): """ Play entire file """ utils.displayInfoMessage('Playing Audio') pathparts = self.file.rsplit(".", 1) fileformat = pathparts[1] song = AudioSegment.from_file(self.file, format=fileformat) play(song) utils.displayInfoMessage('') utils.displayErrorMessage('') def length(self): pathparts = self.file.rsplit(".", 1) fileformat = pathparts[1] song = AudioSegment.from_file(self.file, format=fileformat) return song.duration_seconds
AudioFile.py
753
Init audio stream Play entire file
35
en
0.845398
"""Tasmota MQTT.""" import asyncio import logging from typing import Union import attr from .const import COMMAND_BACKLOG DEBOUNCE_TIMEOUT = 1 _LOGGER = logging.getLogger(__name__) class Timer: """Simple timer.""" def __init__(self, timeout, callback): self._timeout = timeout self._callback = callback self._task = asyncio.ensure_future(self._job()) async def _job(self): await asyncio.sleep(self._timeout) self._callback() def cancel(self): """Cancel the timer.""" self._task.cancel() PublishPayloadType = Union[str, bytes, int, float, None] @attr.s(slots=True, frozen=True) class Message: """MQTT Message.""" topic: str = attr.ib() payload: PublishPayloadType = attr.ib() qos: int = attr.ib() retain: bool = attr.ib() class TasmotaMQTTClient: """Helper class to sue an external MQTT client.""" def __init__(self, publish, subscribe, unsubscribe): """Initialize.""" self._pending_messages = {} self._publish = publish self._subscribe = subscribe self._unsubscribe = unsubscribe def publish(self, *args, **kwds): """Publish a message.""" return self._publish(*args, **kwds) def publish_debounced(self, topic, payload, qos=None, retain=None): """Publish a message, with debounce.""" msg = Message(topic, payload, qos, retain) def publish_callback(): _LOGGER.debug("publish_debounced: publishing %s", msg) self._pending_messages.pop(msg) self.publish(msg.topic, msg.payload, qos=msg.qos, retain=msg.retain) if msg in self._pending_messages: timer = self._pending_messages.pop(msg) timer.cancel() timer = Timer(DEBOUNCE_TIMEOUT, publish_callback) self._pending_messages[msg] = timer async def subscribe(self, sub_state, topics): """Subscribe to topics.""" return await self._subscribe(sub_state, topics) async def unsubscribe(self, sub_state): """Unsubscribe from topics.""" return await self._unsubscribe(sub_state) def send_commands(mqtt_client, command_topic, commands): """Send a sequence of commands.""" backlog_topic = command_topic + COMMAND_BACKLOG backlog = ";".join(["NoDelay;%s %s" % command for command in commands]) mqtt_client.publish(backlog_topic, backlog)
hatasmota/mqtt.py
2,424
MQTT Message. Helper class to sue an external MQTT client. Simple timer. Initialize. Cancel the timer. Publish a message. Publish a message, with debounce. Send a sequence of commands. Tasmota MQTT.
198
en
0.751326
from gym_multigrid.multigrid import * class CollectGameEnv(MultiGridEnv): """ Environment in which the agents have to collect the balls """ def __init__( self, size=10, width=None, height=None, num_balls=[], agents_index = [], balls_index=[], balls_reward=[], zero_sum = False, view_size=7 ): self.num_balls = num_balls self.balls_index = balls_index self.balls_reward = balls_reward self.zero_sum = zero_sum self.world = World agents = [] for i in agents_index: agents.append(Agent(self.world, i, view_size=view_size)) super().__init__( grid_size=size, width=width, height=height, max_steps= 10000, # Set this to True for maximum speed see_through_walls=False, agents=agents, agent_view_size=view_size ) def _gen_grid(self, width, height): self.grid = Grid(width, height) # Generate the surrounding walls self.grid.horz_wall(self.world, 0, 0) self.grid.horz_wall(self.world, 0, height-1) self.grid.vert_wall(self.world, 0, 0) self.grid.vert_wall(self.world, width-1, 0) for number, index, reward in zip(self.num_balls, self.balls_index, self.balls_reward): for i in range(number): self.place_obj(Ball(self.world, index, reward)) # Randomize the player start position and orientation for a in self.agents: self.place_agent(a) def _reward(self, i, rewards, reward=1): """ Compute the reward to be given upon success """ for j,a in enumerate(self.agents): if a.index==i or a.index==0: rewards[j]+=reward if self.zero_sum: if a.index!=i or a.index==0: rewards[j] -= reward def _handle_pickup(self, i, rewards, fwd_pos, fwd_cell): if fwd_cell: if fwd_cell.can_pickup(): if fwd_cell.index in [0, self.agents[i].index]: fwd_cell.cur_pos = np.array([-1, -1]) self.grid.set(*fwd_pos, None) self._reward(i, rewards, fwd_cell.reward) def _handle_drop(self, i, rewards, fwd_pos, fwd_cell): pass def step(self, actions): obs, rewards, done, info = MultiGridEnv.step(self, actions) return obs, rewards, done, info class CollectGame4HEnv10x10N2(CollectGameEnv): def __init__(self): super().__init__(size=10, num_balls=[5], agents_index = [1,2,3], balls_index=[0], balls_reward=[1], zero_sum=True)
gym_multigrid/envs/collect_game.py
2,795
Environment in which the agents have to collect the balls Compute the reward to be given upon success Set this to True for maximum speed Generate the surrounding walls Randomize the player start position and orientation
221
en
0.886428
# ble_command_load_group.py/Open GoPro, Version 2.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro). # This copyright was auto-generated on Wed, Sep 1, 2021 5:05:57 PM import sys import asyncio import logging import argparse from typing import Optional from binascii import hexlify from bleak import BleakClient from tutorial_modules import GOPRO_BASE_UUID, connect_ble logging.basicConfig(level=logging.INFO) logger = logging.getLogger() async def main(identifier: Optional[str]) -> None: # Synchronization event to wait until notification response is received event = asyncio.Event() # UUIDs to write to and receive responses from COMMAND_REQ_UUID = GOPRO_BASE_UUID.format("0072") COMMAND_RSP_UUID = GOPRO_BASE_UUID.format("0073") response_uuid = COMMAND_RSP_UUID client: BleakClient def notification_handler(handle: int, data: bytes) -> None: logger.info(f'Received response at {handle=}: {hexlify(data, ":")!r}') # If this is the correct handle and the status is success, the command was a success if client.services.characteristics[handle].uuid == response_uuid and data[2] == 0x00: logger.info("Command sent successfully") # Anything else is unexpected. This shouldn't happen else: logger.error("Unexpected response") # Notify the writer event.set() client = await connect_ble(notification_handler, identifier) # Write to command request BleUUID to load the video preset group logger.info("Loading the video preset group...") event.clear() await client.write_gatt_char(COMMAND_REQ_UUID, bytearray([0x04, 0x3E, 0x02, 0x03, 0xE8])) await event.wait() # Wait to receive the notification response await client.disconnect() if __name__ == "__main__": parser = argparse.ArgumentParser( description="Connect to a GoPro camera, then change the Preset Group to Video." ) parser.add_argument( "-i", "--identifier", type=str, help="Last 4 digits of GoPro serial number, which is the last 4 digits of the default camera SSID. If not used, first discovered GoPro will be connected to", default=None, ) args = parser.parse_args() try: asyncio.run(main(args.identifier)) except: sys.exit(-1) else: sys.exit(0)
demos/python/tutorial/tutorial_modules/tutorial_2_send_ble_commands/ble_command_load_group.py
2,376
ble_command_load_group.py/Open GoPro, Version 2.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro). This copyright was auto-generated on Wed, Sep 1, 2021 5:05:57 PM Synchronization event to wait until notification response is received UUIDs to write to and receive responses from If this is the correct handle and the status is success, the command was a success Anything else is unexpected. This shouldn't happen Notify the writer Write to command request BleUUID to load the video preset group Wait to receive the notification response
550
en
0.929134
import inspect from collections import OrderedDict from json.decoder import JSONDecodeError from typing import Optional, Tuple, Union from urllib.parse import urljoin import requests from bs4 import BeautifulSoup from recipe_scrapers.settings import settings from ._schemaorg import SchemaOrg # some sites close their content for 'bots', so user-agent must be supplied HEADERS = { "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7" } class AbstractScraper: def __init__( self, url, proxies: Optional[str] = None, # allows us to specify optional proxy server timeout: Optional[ Union[float, Tuple, None] ] = None, # allows us to specify optional timeout for request ): if settings.TEST_MODE: # when testing, we load a file page_data = url.read() url = "https://test.example.com/" else: page_data = requests.get( url, headers=HEADERS, proxies=proxies, timeout=timeout ).content self.soup = BeautifulSoup(page_data, "html.parser") self.url = url # Attempt to read Schema.org data. Gracefully fail if it raises an exception parsing the JSON. # The scraper subclass can use BeautifulSoup to extract the information. try: self.schema = SchemaOrg(page_data) except JSONDecodeError: pass # attach the plugins as instructed in settings.PLUGINS for name, func in inspect.getmembers(self, inspect.ismethod): current_method = getattr(self.__class__, name) for plugin in reversed(settings.PLUGINS): if plugin.should_run(self.host(), name): current_method = plugin.run(current_method) setattr(self.__class__, name, current_method) @classmethod def host(cls) -> str: """ get the host of the url, so we can use the correct scraper """ raise NotImplementedError("This should be implemented.") def canonical_url(self): canonical_link = self.soup.find("link", {"rel": "canonical", "href": True}) if canonical_link: return urljoin(self.url, canonical_link["href"]) return self.url def title(self): raise NotImplementedError("This should be implemented.") def total_time(self): """ total time it takes to preparate the recipe in minutes """ raise NotImplementedError("This should be implemented.") def yields(self): """ The number of servings or items in the recipe """ raise NotImplementedError("This should be implemented.") def image(self): raise NotImplementedError("This should be implemented.") def nutrients(self): raise NotImplementedError("This should be implemented.") def language(self): """ Human language the recipe is written in. May be overridden by individual scrapers. """ candidate_languages = OrderedDict() html = self.soup.find("html", {"lang": True}) candidate_languages[html.get("lang")] = True # Deprecated: check for a meta http-equiv header # See: https://www.w3.org/International/questions/qa-http-and-lang meta_language = ( self.soup.find( "meta", { "http-equiv": lambda x: x and x.lower() == "content-language", "content": True, }, ) if settings.META_HTTP_EQUIV else None ) if meta_language: language = meta_language.get("content").split(",", 1)[0] if language: candidate_languages[language] = True # If other langs exist, remove 'en' commonly generated by HTML editors if len(candidate_languages) > 1: candidate_languages.pop("en", None) # Return the first candidate language return candidate_languages.popitem(last=False)[0] def ingredients(self): raise NotImplementedError("This should be implemented.") def instructions(self): raise NotImplementedError("This should be implemented.") def ratings(self): raise NotImplementedError("This should be implemented.") def author(self): raise NotImplementedError("This should be implemented.") def reviews(self): raise NotImplementedError("This should be implemented.") def links(self): invalid_href = {"#", ""} links_html = self.soup.findAll("a", href=True) return [link.attrs for link in links_html if link["href"] not in invalid_href] def site_name(self): meta = self.soup.find("meta", property="og:site_name") return meta.get("content") if meta else None
recipe_scrapers/_abstract.py
4,871
get the host of the url, so we can use the correct scraper Human language the recipe is written in. May be overridden by individual scrapers. total time it takes to preparate the recipe in minutes The number of servings or items in the recipe some sites close their content for 'bots', so user-agent must be supplied allows us to specify optional proxy server allows us to specify optional timeout for request when testing, we load a file Attempt to read Schema.org data. Gracefully fail if it raises an exception parsing the JSON. The scraper subclass can use BeautifulSoup to extract the information. attach the plugins as instructed in settings.PLUGINS Deprecated: check for a meta http-equiv header See: https://www.w3.org/International/questions/qa-http-and-lang If other langs exist, remove 'en' commonly generated by HTML editors Return the first candidate language
878
en
0.744557
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= # pylint: disable=unused-import,g-bad-import-order """## Activation Functions The activation ops provide different types of nonlinearities for use in neural networks. These include smooth nonlinearities (`sigmoid`, `tanh`, `elu`, `softplus`, and `softsign`), continuous but not everywhere differentiable functions (`relu`, `relu6`, and `relu_x`), and random regularization (`dropout`). All activation ops apply componentwise, and produce a tensor of the same shape as the input tensor. @@relu @@relu6 @@elu @@softplus @@softsign @@dropout @@bias_add @@sigmoid @@tanh ## Convolution The convolution ops sweep a 2-D filter over a batch of images, applying the filter to each window of each image of the appropriate size. The different ops trade off between generic vs. specific filters: * `conv2d`: Arbitrary filters that can mix channels together. * `depthwise_conv2d`: Filters that operate on each channel independently. * `separable_conv2d`: A depthwise spatial filter followed by a pointwise filter. Note that although these ops are called "convolution", they are strictly speaking "cross-correlation" since the filter is combined with an input window without reversing the filter. For details, see [the properties of cross-correlation](https://en.wikipedia.org/wiki/Cross-correlation#Properties). The filter is applied to image patches of the same size as the filter and strided according to the `strides` argument. `strides = [1, 1, 1, 1]` applies the filter to a patch at every offset, `strides = [1, 2, 2, 1]` applies the filter to every other image patch in each dimension, etc. Ignoring channels for the moment, and assume that the 4-D `input` has shape `[batch, in_height, in_width, ...]` and the 4-D `filter` has shape `[filter_height, filter_width, ...]`, then the spatial semantics of the convolution ops are as follows: first, according to the padding scheme chosen as `'SAME'` or `'VALID'`, the output size and the padding pixels are computed. For the `'SAME'` padding, the output height and width are computed as: out_height = ceil(float(in_height) / float(strides[1])) out_width = ceil(float(in_width) / float(strides[2])) and the padding on the top and left are computed as: pad_along_height = ((out_height - 1) * strides[1] + filter_height - in_height) pad_along_width = ((out_width - 1) * strides[2] + filter_width - in_width) pad_top = pad_along_height / 2 pad_left = pad_along_width / 2 Note that the division by 2 means that there might be cases when the padding on both sides (top vs bottom, right vs left) are off by one. In this case, the bottom and right sides always get the one additional padded pixel. For example, when `pad_along_height` is 5, we pad 2 pixels at the top and 3 pixels at the bottom. Note that this is different from existing libraries such as cuDNN and Caffe, which explicitly specify the number of padded pixels and always pad the same number of pixels on both sides. For the `'VALID`' padding, the output height and width are computed as: out_height = ceil(float(in_height - filter_height + 1) / float(strides[1])) out_width = ceil(float(in_width - filter_width + 1) / float(strides[2])) and the padding values are always zero. The output is then computed as output[b, i, j, :] = sum_{di, dj} input[b, strides[1] * i + di - pad_top, strides[2] * j + dj - pad_left, ...] * filter[di, dj, ...] where any value outside the original input image region are considered zero ( i.e. we pad zero values around the border of the image). Since `input` is 4-D, each `input[b, i, j, :]` is a vector. For `conv2d`, these vectors are multiplied by the `filter[di, dj, :, :]` matrices to produce new vectors. For `depthwise_conv_2d`, each scalar component `input[b, i, j, k]` is multiplied by a vector `filter[di, dj, k]`, and all the vectors are concatenated. @@conv2d @@depthwise_conv2d @@separable_conv2d @@atrous_conv2d @@conv2d_transpose @@conv3d ## Pooling The pooling ops sweep a rectangular window over the input tensor, computing a reduction operation for each window (average, max, or max with argmax). Each pooling op uses rectangular windows of size `ksize` separated by offset `strides`. For example, if `strides` is all ones every window is used, if `strides` is all twos every other window is used in each dimension, etc. In detail, the output is output[i] = reduce(value[strides * i:strides * i + ksize]) where the indices also take into consideration the padding values. Please refer to the `Convolution` section for details about the padding calculation. @@avg_pool @@max_pool @@max_pool_with_argmax @@avg_pool3d @@max_pool3d ## Morphological filtering Morphological operators are non-linear filters used in image processing. [Greyscale morphological dilation] (https://en.wikipedia.org/wiki/Dilation_(morphology)) is the max-sum counterpart of standard sum-product convolution: output[b, y, x, c] = max_{dy, dx} input[b, strides[1] * y + rates[1] * dy, strides[2] * x + rates[2] * dx, c] + filter[dy, dx, c] The `filter` is usually called structuring function. Max-pooling is a special case of greyscale morphological dilation when the filter assumes all-zero values (a.k.a. flat structuring function). [Greyscale morphological erosion] (https://en.wikipedia.org/wiki/Erosion_(morphology)) is the min-sum counterpart of standard sum-product convolution: output[b, y, x, c] = min_{dy, dx} input[b, strides[1] * y - rates[1] * dy, strides[2] * x - rates[2] * dx, c] - filter[dy, dx, c] Dilation and erosion are dual to each other. The dilation of the input signal `f` by the structuring signal `g` is equal to the negation of the erosion of `-f` by the reflected `g`, and vice versa. Striding and padding is carried out in exactly the same way as in standard convolution. Please refer to the `Convolution` section for details. @@dilation2d @@erosion2d ## Normalization Normalization is useful to prevent neurons from saturating when inputs may have varying scale, and to aid generalization. @@l2_normalize @@local_response_normalization @@sufficient_statistics @@normalize_moments @@moments ## Losses The loss ops measure error between two tensors, or between a tensor and zero. These can be used for measuring accuracy of a network in a regression task or for regularization purposes (weight decay). @@l2_loss ## Classification TensorFlow provides several operations that help you perform classification. @@sigmoid_cross_entropy_with_logits @@softmax @@log_softmax @@softmax_cross_entropy_with_logits @@sparse_softmax_cross_entropy_with_logits @@weighted_cross_entropy_with_logits ## Embeddings TensorFlow provides library support for looking up values in embedding tensors. @@embedding_lookup @@embedding_lookup_sparse ## Recurrent Neural Networks TensorFlow provides a number of methods for constructing Recurrent Neural Networks. Most accept an `RNNCell`-subclassed object (see the documentation for `tf.nn.rnn_cell`). @@dynamic_rnn @@rnn @@state_saving_rnn @@bidirectional_rnn ## Conectionist Temporal Classification (CTC) @@ctc_loss @@ctc_greedy_decoder @@ctc_beam_search_decoder ## Evaluation The evaluation ops are useful for measuring the performance of a network. Since they are nondifferentiable, they are typically used at evaluation time. @@top_k @@in_top_k ## Candidate Sampling Do you want to train a multiclass or multilabel model with thousands or millions of output classes (for example, a language model with a large vocabulary)? Training with a full Softmax is slow in this case, since all of the classes are evaluated for every training example. Candidate Sampling training algorithms can speed up your step times by only considering a small randomly-chosen subset of contrastive classes (called candidates) for each batch of training examples. See our [Candidate Sampling Algorithms Reference] (../../extras/candidate_sampling.pdf) ### Sampled Loss Functions TensorFlow provides the following sampled loss functions for faster training. @@nce_loss @@sampled_softmax_loss ### Candidate Samplers TensorFlow provides the following samplers for randomly sampling candidate classes when using one of the sampled loss functions above. @@uniform_candidate_sampler @@log_uniform_candidate_sampler @@learned_unigram_candidate_sampler @@fixed_unigram_candidate_sampler ### Miscellaneous candidate sampling utilities @@compute_accidental_hits """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import candidate_sampling_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_grad from tensorflow.python.ops import nn_ops from tensorflow.python.ops import numerics from tensorflow.python.ops import random_ops from tensorflow.python.ops import rnn_cell from tensorflow.python.ops import seq2seq from tensorflow.python.ops import sparse_ops from tensorflow.python.ops import variable_scope as vs from tensorflow.python.ops.math_ops import sigmoid from tensorflow.python.ops.math_ops import tanh from tensorflow.python.util.all_util import make_all # Bring more nn-associated functionality into this package. # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.ctc_ops import * from tensorflow.python.ops.nn_ops import * from tensorflow.python.ops.candidate_sampling_ops import * from tensorflow.python.ops.embedding_ops import * from tensorflow.python.ops.rnn import * # pylint: enable=wildcard-import def sigmoid_cross_entropy_with_logits(logits, targets, name=None): """Computes sigmoid cross entropy given `logits`. Measures the probability error in discrete classification tasks in which each class is independent and not mutually exclusive. For instance, one could perform multilabel classification where a picture can contain both an elephant and a dog at the same time. For brevity, let `x = logits`, `z = targets`. The logistic loss is z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x))) = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) = (1 - z) * x + log(1 + exp(-x)) = x - x * z + log(1 + exp(-x)) For x < 0, to avoid overflow in exp(-x), we reformulate the above x - x * z + log(1 + exp(-x)) = log(exp(x)) - x * z + log(1 + exp(-x)) = - x * z + log(1 + exp(x)) Hence, to ensure stability and avoid overflow, the implementation uses this equivalent formulation max(x, 0) - x * z + log(1 + exp(-abs(x))) `logits` and `targets` must have the same type and shape. Args: logits: A `Tensor` of type `float32` or `float64`. targets: A `Tensor` of the same type and shape as `logits`. name: A name for the operation (optional). Returns: A `Tensor` of the same shape as `logits` with the componentwise logistic losses. Raises: ValueError: If `logits` and `targets` do not have the same shape. """ with ops.op_scope([logits, targets], name, "logistic_loss") as name: logits = ops.convert_to_tensor(logits, name="logits") targets = ops.convert_to_tensor(targets, name="targets") try: targets.get_shape().merge_with(logits.get_shape()) except ValueError: raise ValueError( "logits and targets must have the same shape (%s vs %s)" % (logits.get_shape(), targets.get_shape())) # The logistic loss formula from above is # x - x * z + log(1 + exp(-x)) # For x < 0, a more numerically stable formula is # -x * z + log(1 + exp(x)) # Note that these two expressions can be combined into the following: # max(x, 0) - x * z + log(1 + exp(-abs(x))) # To allow computing gradients at zero, we define custom versions of max and # abs functions. zeros = array_ops.zeros_like(logits, dtype=logits.dtype) cond = (logits >= zeros) relu_logits = math_ops.select(cond, logits, zeros) neg_abs_logits = math_ops.select(cond, -logits, logits) return math_ops.add(relu_logits - logits * targets, math_ops.log(1 + math_ops.exp(neg_abs_logits)), name=name) def weighted_cross_entropy_with_logits(logits, targets, pos_weight, name=None): """Computes a weighted cross entropy. This is like `sigmoid_cross_entropy_with_logits()` except that `pos_weight`, allows one to trade off recall and precision by up- or down-weighting the cost of a positive error relative to a negative error. The usual cross-entropy cost is defined as: targets * -log(sigmoid(logits)) + (1 - targets) * -log(1 - sigmoid(logits)) The argument `pos_weight` is used as a multiplier for the positive targets: targets * -log(sigmoid(logits)) * pos_weight + (1 - targets) * -log(1 - sigmoid(logits)) For brevity, let `x = logits`, `z = targets`, `q = pos_weight`. The loss is: qz * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) = qz * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x))) = qz * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) = qz * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) = (1 - z) * x + (qz + 1 - z) * log(1 + exp(-x)) = (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x)) Setting `l = (1 + (q - 1) * z)`, to ensure stability and avoid overflow, the implementation uses (1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0)) `logits` and `targets` must have the same type and shape. Args: logits: A `Tensor` of type `float32` or `float64`. targets: A `Tensor` of the same type and shape as `logits`. pos_weight: A coefficient to use on the positive examples. name: A name for the operation (optional). Returns: A `Tensor` of the same shape as `logits` with the componentwise weightedlogistic losses. Raises: ValueError: If `logits` and `targets` do not have the same shape. """ with ops.op_scope([logits, targets], name, "logistic_loss") as name: logits = ops.convert_to_tensor(logits, name="logits") targets = ops.convert_to_tensor(targets, name="targets") try: targets.get_shape().merge_with(logits.get_shape()) except ValueError: raise ValueError( "logits and targets must have the same shape (%s vs %s)" % (logits.get_shape(), targets.get_shape())) # The logistic loss formula from above is # (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x)) # For x < 0, a more numerically stable formula is # (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(x)) - l * x # To avoid branching, we use the combined version # (1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0)) log_weight = 1 + (pos_weight - 1) * targets return math_ops.add( (1 - targets) * logits, log_weight * (math_ops.log(1 + math_ops.exp(-math_ops.abs(logits))) + nn_ops.relu(-logits)), name=name) def relu_layer(x, weights, biases, name=None): """Computes Relu(x * weight + biases). Args: x: a 2D tensor. Dimensions typically: batch, in_units weights: a 2D tensor. Dimensions typically: in_units, out_units biases: a 1D tensor. Dimensions: out_units name: A name for the operation (optional). If not specified "nn_relu_layer" is used. Returns: A 2-D Tensor computing relu(matmul(x, weights) + biases). Dimensions typically: batch, out_units. """ with ops.op_scope([x, weights, biases], name, "relu_layer") as name: x = ops.convert_to_tensor(x, name="x") weights = ops.convert_to_tensor(weights, name="weights") biases = ops.convert_to_tensor(biases, name="biases") xw_plus_b = nn_ops.bias_add(math_ops.matmul(x, weights), biases) return nn_ops.relu(xw_plus_b, name=name) def l2_normalize(x, dim, epsilon=1e-12, name=None): """Normalizes along dimension `dim` using an L2 norm. For a 1-D tensor with `dim = 0`, computes output = x / sqrt(max(sum(x**2), epsilon)) For `x` with more dimensions, independently normalizes each 1-D slice along dimension `dim`. Args: x: A `Tensor`. dim: Dimension along which to normalize. epsilon: A lower bound value for the norm. Will use `sqrt(epsilon)` as the divisor if `norm < sqrt(epsilon)`. name: A name for this operation (optional). Returns: A `Tensor` with the same shape as `x`. """ with ops.op_scope([x], name, "l2_normalize") as name: x = ops.convert_to_tensor(x, name="x") square_sum = math_ops.reduce_sum(math_ops.square(x), [dim], keep_dims=True) x_inv_norm = math_ops.rsqrt(math_ops.maximum(square_sum, epsilon)) return math_ops.mul(x, x_inv_norm, name=name) def zero_fraction(value, name=None): """Returns the fraction of zeros in `value`. If `value` is empty, the result is `nan`. This is useful in summaries to measure and report sparsity. For example, z = tf.Relu(...) summ = tf.scalar_summary('sparsity', tf.nn.zero_fraction(z)) Args: value: A tensor of numeric type. name: A name for the operation (optional). Returns: The fraction of zeros in `value`, with type `float32`. """ with ops.op_scope([value], name, "zero_fraction"): value = ops.convert_to_tensor(value, name="value") zero = constant_op.constant(0, dtype=value.dtype, name="zero") return math_ops.reduce_mean(math_ops.cast(math_ops.equal(value, zero), dtypes.float32)) def depthwise_conv2d(input, filter, strides, padding, name=None): """Depthwise 2-D convolution. Given an input tensor of shape `[batch, in_height, in_width, in_channels]` and a filter tensor of shape `[filter_height, filter_width, in_channels, channel_multiplier]` containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies a different filter to each input channel (expanding from 1 channel to `channel_multiplier` channels for each), then concatenates the results together. The output has `in_channels * channel_multiplier` channels. In detail, output[b, i, j, k * channel_multiplier + q] = sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] * filter[di, dj, k, q] Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertical strides, `strides = [1, stride, stride, 1]`. Args: input: 4-D with shape `[batch, in_height, in_width, in_channels]`. filter: 4-D with shape `[filter_height, filter_width, in_channels, channel_multiplier]`. strides: 1-D of size 4. The stride of the sliding window for each dimension of `input`. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the [comment here](https://www.tensorflow.org/api_docs/python/nn.html#convolution) name: A name for this operation (optional). Returns: A 4-D `Tensor` of shape `[batch, out_height, out_width, in_channels * channel_multiplier].` """ with ops.op_scope([input, filter], name, "depthwise") as name: input = ops.convert_to_tensor(input, name="tensor_in") filter = ops.convert_to_tensor(filter, name="filter_in") # A shape is required to statically compute the number of separable filters. if filter.get_shape().ndims is not None: assert len(filter.get_shape()) == 4 in_channels = filter.get_shape()[2] # Sanity checks, if shape information is available for the inputs. if input.get_shape().ndims is not None: assert len(input.get_shape()) == 4 assert input.get_shape()[3] == in_channels, ( "Mismatched input depth %d and number of depthwise filters %d." % ( input.get_shape()[3].value, in_channels)) else: assert input.get_shape().ndims is not None, ( "Either tensor must provide static shape information.") assert input.get_shape().ndims == 4 in_channels = input.get_shape()[3] if in_channels == 1: return nn_ops.conv2d(input, filter, strides, padding, name=name) else: return nn_ops.depthwise_conv2d_native(input, filter, strides, padding, name=name) def separable_conv2d(input, depthwise_filter, pointwise_filter, strides, padding, name=None): """2-D convolution with separable filters. Performs a depthwise convolution that acts separately on channels followed by a pointwise convolution that mixes channels. Note that this is separability between dimensions `[1, 2]` and `3`, not spatial separability between dimensions `1` and `2`. In detail, output[b, i, j, k] = sum_{di, dj, q, r] input[b, strides[1] * i + di, strides[2] * j + dj, q] * depthwise_filter[di, dj, q, r] * pointwise_filter[0, 0, q * channel_multiplier + r, k] `strides` controls the strides for the depthwise convolution only, since the pointwise convolution has implicit strides of `[1, 1, 1, 1]`. Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertical strides, `strides = [1, stride, stride, 1]`. Args: input: 4-D `Tensor` with shape `[batch, in_height, in_width, in_channels]`. depthwise_filter: 4-D `Tensor` with shape `[filter_height, filter_width, in_channels, channel_multiplier]`. Contains `in_channels` convolutional filters of depth 1. pointwise_filter: 4-D `Tensor` with shape `[1, 1, channel_multiplier * in_channels, out_channels]`. Pointwise filter to mix channels after `depthwise_filter` has convolved spatially. strides: 1-D of size 4. The strides for the depthwise convolution for each dimension of `input`. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the [comment here](https://www.tensorflow.org/api_docs/python/nn.html#convolution) name: A name for this operation (optional). Returns: A 4-D `Tensor` of shape `[batch, out_height, out_width, out_channels]`. Raises: ValueError: If channel_multiplier * in_channels > out_channels, which means that the separable convolution is overparameterized. """ with ops.op_scope([input, depthwise_filter, pointwise_filter], name, "separable_conv2d") as name: input = ops.convert_to_tensor(input, name="tensor_in") depthwise_filter = ops.convert_to_tensor(depthwise_filter, name="depthwise_filter") pointwise_filter = ops.convert_to_tensor(pointwise_filter, name="pointwise_filter") if pointwise_filter.get_shape().ndims is not None: assert len(pointwise_filter.get_shape()) == 4 assert pointwise_filter.get_shape()[0] == 1 assert pointwise_filter.get_shape()[1] == 1 if depthwise_filter.get_shape().ndims and input.get_shape().ndims: channel_multiplier = depthwise_filter.get_shape()[3] in_channels = input.get_shape()[3] out_channels = pointwise_filter.get_shape()[3] if channel_multiplier * in_channels > out_channels: raise ValueError( ("Refusing to perform an overparameterized separable " "convolution: channel_multiplier * in_channels = " "%d * %d = %d > %d = out_channels" % (channel_multiplier, in_channels, channel_multiplier * in_channels, out_channels))) # The layout of the ops in the graph are expected to be as follows: # depthwise_conv2d // Conv2D op corresponding to native deptwise conv. # separable_conv2d // Conv2D op corresponding to the pointwise conv. depthwise = nn_ops.depthwise_conv2d_native(input, depthwise_filter, strides, padding, name="depthwise") return nn_ops.conv2d(depthwise, pointwise_filter, [1, 1, 1, 1], padding="VALID", name=name) def sufficient_statistics(x, axes, shift=None, keep_dims=False, name=None): """Calculate the sufficient statistics for the mean and variance of `x`. These sufficient statistics are computed using the one pass algorithm on an input that's optionally shifted. See: https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data Args: x: A `Tensor`. axes: Array of ints. Axes along which to compute mean and variance. shift: A `Tensor` containing the value by which to shift the data for numerical stability, or `None` if no shift is to be performed. A shift close to the true mean provides the most numerically stable results. keep_dims: produce statistics with the same dimensionality as the input. name: Name used to scope the operations that compute the sufficient stats. Returns: Four `Tensor` objects of the same type as `x`: * the count (number of elements to average over). * the (possibly shifted) sum of the elements in the array. * the (possibly shifted) sum of squares of the elements in the array. * the shift by which the mean must be corrected or None if `shift` is None. """ with ops.op_scope([x, axes, shift], name, "sufficient_statistics"): x = ops.convert_to_tensor(x, name="x") x_shape = x.get_shape() if x_shape.is_fully_defined(): counts = 1 m_shape = [] for d in xrange(x_shape.ndims): dim = x_shape[d].value if d in set(axes): counts *= dim dim = 1 m_shape.append(dim) counts = constant_op.constant(counts, dtype=x.dtype) else: # shape needs to be inferred at runtime. x_shape = array_ops.shape(x) select_axes = sparse_ops.sparse_to_dense(axes, array_ops.shape(x_shape), True, False) m_shape = math_ops.select(select_axes, array_ops.ones_like(x_shape), x_shape) counts = math_ops.cast( math_ops.reduce_prod(x_shape / m_shape), x.dtype, name="count") if shift is not None: shift = ops.convert_to_tensor(shift, name="shift") m_ss = math_ops.sub(x, shift) v_ss = math_ops.squared_difference(x, shift) else: # no shift. m_ss = x v_ss = math_ops.square(x) m_ss = math_ops.reduce_sum(m_ss, axes, keep_dims=keep_dims, name="mean_ss") v_ss = math_ops.reduce_sum(v_ss, axes, keep_dims=keep_dims, name="var_ss") return counts, m_ss, v_ss, shift def normalize_moments(counts, mean_ss, variance_ss, shift, name=None): """Calculate the mean and variance of based on the sufficient statistics. Args: counts: A `Tensor` containing a the total count of the data (one value). mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly shifted) sum of the elements to average over. variance_ss: A `Tensor` containing the variance sufficient statistics: the (possibly shifted) squared sum of the data to compute the variance over. shift: A `Tensor` containing the value by which the data is shifted for numerical stability, or `None` if no shift was performed. name: Name used to scope the operations that compute the moments. Returns: Two `Tensor` objects: `mean` and `variance`. """ with ops.op_scope([counts, mean_ss, variance_ss, shift], name, "normalize"): divisor = math_ops.inv(counts, name="divisor") if shift is not None: shifted_mean = math_ops.mul(mean_ss, divisor, name="shifted_mean") mean = math_ops.add(shifted_mean, shift, name="mean") else: # no shift. shifted_mean = math_ops.mul(mean_ss, divisor, name="mean") mean = shifted_mean variance = math_ops.sub( math_ops.mul(variance_ss, divisor), math_ops.square(shifted_mean), name="variance") return (mean, variance) def moments(x, axes, shift=None, name=None, keep_dims=False): """Calculate the mean and variance of `x`. The mean and variance are calculated by aggregating the contents of `x` across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean and variance of a vector. When using these moments for batch normalization (see `tf.nn.batch_normalization`): * for so-called "global normalization", used with convolutional filters with shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`. * for simple batch normalization pass `axes=[0]` (batch only). Args: x: A `Tensor`. axes: array of ints. Axes along which to compute mean and variance. shift: A `Tensor` containing the value by which to shift the data for numerical stability, or `None` if no shift is to be performed. A shift close to the true mean provides the most numerically stable results. keep_dims: produce moments with the same dimensionality as the input. name: Name used to scope the operations that compute the moments. Returns: Two `Tensor` objects: `mean` and `variance`. """ with ops.op_scope([x, axes, shift], name, "moments"): # The dynamic range of fp16 is too limited to support the collection of # sufficient statistics. As a workaround we simply perform the operations # on 32-bit floats before converting the mean and variance back to fp16 y = math_ops.cast(x, dtypes.float32) if x.dtype == dtypes.float16 else x counts, m_ss, v_ss, shift = sufficient_statistics(y, axes, shift=shift, keep_dims=keep_dims, name=name) with ops.control_dependencies([counts, m_ss, v_ss]): mean, variance = normalize_moments(counts, m_ss, v_ss, shift, name=name) if x.dtype == dtypes.float16: return (math_ops.cast(mean, dtypes.float16), math_ops.cast( variance, dtypes.float16)) else: return (mean, variance) def batch_normalization(x, mean, variance, offset, scale, variance_epsilon, name=None): """Batch normalization. As described in http://arxiv.org/abs/1502.03167. Normalizes a tensor by `mean` and `variance`, and applies (optionally) a `scale` \\\\(\gamma\\\\) to it, as well as an `offset` \\\\(\\beta\\\\): \\\\(\\frac{\gamma(x-\mu)}{\sigma}+\\beta\\\\) `mean`, `variance`, `offset` and `scale` are all expected to be of one of two shapes: * In all generality, they can have the same number of dimensions as the input `x`, with identical sizes as `x` for the dimensions that are not normalized over (the 'depth' dimension(s)), and dimension 1 for the others which are being normalized over. `mean` and `variance` in this case would typically be the outputs of `tf.nn.moments(..., keep_dims=True)` during training, or running averages thereof during inference. * In the common case where the 'depth' dimension is the last dimension in the input tensor `x`, they may be one dimensional tensors of the same size as the 'depth' dimension. This is the case for example for the common `[batch, depth]` layout of fully-connected layers, and `[batch, height, width, depth]` for convolutions. `mean` and `variance` in this case would typically be the outputs of `tf.nn.moments(..., keep_dims=False)` during training, or running averages thereof during inference. Args: x: Input `Tensor` of arbitrary dimensionality. mean: A mean `Tensor`. variance: A variance `Tensor`. offset: An offset `Tensor`, often denoted \\\\(\\beta\\\\) in equations, or None. If present, will be added to the normalized tensor. scale: A scale `Tensor`, often denoted \\\\(\gamma\\\\) in equations, or `None`. If present, the scale is applied to the normalized tensor. variance_epsilon: A small float number to avoid dividing by 0. name: A name for this operation (optional). Returns: the normalized, scaled, offset tensor. """ with ops.op_scope([x, mean, variance, scale, offset], name, "batchnorm"): inv = math_ops.rsqrt(variance + variance_epsilon) if scale is not None: inv *= scale return x * inv + ( offset - mean * inv if offset is not None else -mean * inv) def batch_norm_with_global_normalization(t, m, v, beta, gamma, variance_epsilon, scale_after_normalization, name=None): """Batch normalization. This op is deprecated. See `tf.nn.batch_normalization`. Args: t: A 4D input Tensor. m: A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, or a saved moving average thereof. v: A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, or a saved moving average thereof. beta: A 1D beta Tensor with size matching the last dimension of t. An offset to be added to the normalized tensor. gamma: A 1D gamma Tensor with size matching the last dimension of t. If "scale_after_normalization" is true, this tensor will be multiplied with the normalized tensor. variance_epsilon: A small float number to avoid dividing by 0. scale_after_normalization: A bool indicating whether the resulted tensor needs to be multiplied with gamma. name: A name for this operation (optional). Returns: A batch-normalized `t`. """ return batch_normalization(t, m, v, beta, gamma if scale_after_normalization else None, variance_epsilon, name) def _sum_rows(x): """Returns a vector summing up each row of the matrix x.""" # _sum_rows(x) is equivalent to math_ops.reduce_sum(x, 1) when x is # a matrix. The gradient of _sum_rows(x) is more efficient than # reduce_sum(x, 1)'s gradient in today's implementation. Therefore, # we use _sum_rows(x) in the nce_loss() computation since the loss # is mostly used for training. cols = array_ops.shape(x)[1] ones_shape = array_ops.pack([cols, 1]) ones = array_ops.ones(ones_shape, x.dtype) return array_ops.reshape(math_ops.matmul(x, ones), [-1]) def _compute_sampled_logits(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, subtract_log_q=True, remove_accidental_hits=False, partition_strategy="mod", name=None): """Helper function for nce_loss and sampled_softmax_loss functions. Computes sampled output training logits and labels suitable for implementing e.g. noise-contrastive estimation (see nce_loss) or sampled softmax (see sampled_softmax_loss). Note: In the case where num_true > 1, we assign to each target class the target probability 1 / num_true so that the target probabilities sum to 1 per-example. Args: weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor` objects whose concatenation along dimension 0 has shape `[num_classes, dim]`. The (possibly-partitioned) class embeddings. biases: A `Tensor` of shape `[num_classes]`. The class biases. inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. Note that this format differs from the `labels` argument of `nn.softmax_cross_entropy_with_logits`. num_sampled: An `int`. The number of classes to randomly sample per batch. num_classes: An `int`. The number of possible classes. num_true: An `int`. The number of target classes per training example. sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`, `sampled_expected_count`) returned by a `*_candidate_sampler` function. (if None, we default to `log_uniform_candidate_sampler`) subtract_log_q: A `bool`. whether to subtract the log expected count of the labels in the sample to get the logits of the true labels. Default is True. Turn off for Negative Sampling. remove_accidental_hits: A `bool`. whether to remove "accidental hits" where a sampled class equals one of the target classes. Default is False. partition_strategy: A string specifying the partitioning strategy, relevant if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported. Default is `"mod"`. See `tf.nn.embedding_lookup` for more details. name: A name for the operation (optional). Returns: out_logits, out_labels: `Tensor` objects each with shape `[batch_size, num_true + num_sampled]`, for passing to either `nn.sigmoid_cross_entropy_with_logits` (NCE) or `nn.softmax_cross_entropy_with_logits` (sampled softmax). """ if not isinstance(weights, list): weights = [weights] with ops.op_scope( weights + [biases, inputs, labels], name, "compute_sampled_logits"): if labels.dtype != dtypes.int64: labels = math_ops.cast(labels, dtypes.int64) labels_flat = array_ops.reshape(labels, [-1]) # Sample the negative labels. # sampled shape: [num_sampled] tensor # true_expected_count shape = [batch_size, 1] tensor # sampled_expected_count shape = [num_sampled] tensor if sampled_values is None: sampled_values = candidate_sampling_ops.log_uniform_candidate_sampler( true_classes=labels, num_true=num_true, num_sampled=num_sampled, unique=True, range_max=num_classes) # NOTE: pylint cannot tell that 'sampled_values' is a sequence # pylint: disable=unpacking-non-sequence sampled, true_expected_count, sampled_expected_count = sampled_values # pylint: enable=unpacking-non-sequence # labels_flat is a [batch_size * num_true] tensor # sampled is a [num_sampled] int tensor all_ids = array_ops.concat(0, [labels_flat, sampled]) # weights shape is [num_classes, dim] all_w = embedding_ops.embedding_lookup( weights, all_ids, partition_strategy=partition_strategy) all_b = embedding_ops.embedding_lookup(biases, all_ids) # true_w shape is [batch_size * num_true, dim] # true_b is a [batch_size * num_true] tensor true_w = array_ops.slice( all_w, [0, 0], array_ops.pack([array_ops.shape(labels_flat)[0], -1])) true_b = array_ops.slice(all_b, [0], array_ops.shape(labels_flat)) # inputs shape is [batch_size, dim] # true_w shape is [batch_size * num_true, dim] # row_wise_dots is [batch_size, num_true, dim] dim = array_ops.shape(true_w)[1:2] new_true_w_shape = array_ops.concat(0, [[-1, num_true], dim]) row_wise_dots = math_ops.mul( array_ops.expand_dims(inputs, 1), array_ops.reshape(true_w, new_true_w_shape)) # We want the row-wise dot plus biases which yields a # [batch_size, num_true] tensor of true_logits. dots_as_matrix = array_ops.reshape(row_wise_dots, array_ops.concat(0, [[-1], dim])) true_logits = array_ops.reshape(_sum_rows(dots_as_matrix), [-1, num_true]) true_b = array_ops.reshape(true_b, [-1, num_true]) true_logits += true_b # Lookup weights and biases for sampled labels. # sampled_w shape is [num_sampled, dim] # sampled_b is a [num_sampled] float tensor sampled_w = array_ops.slice( all_w, array_ops.pack([array_ops.shape(labels_flat)[0], 0]), [-1, -1]) sampled_b = array_ops.slice(all_b, array_ops.shape(labels_flat), [-1]) # inputs has shape [batch_size, dim] # sampled_w has shape [num_sampled, dim] # sampled_b has shape [num_sampled] # Apply X*W'+B, which yields [batch_size, num_sampled] sampled_logits = math_ops.matmul(inputs, sampled_w, transpose_b=True) + sampled_b if remove_accidental_hits: acc_hits = candidate_sampling_ops.compute_accidental_hits( labels, sampled, num_true=num_true) acc_indices, acc_ids, acc_weights = acc_hits # This is how SparseToDense expects the indices. acc_indices_2d = array_ops.reshape(acc_indices, [-1, 1]) acc_ids_2d_int32 = array_ops.reshape(math_ops.cast( acc_ids, dtypes.int32), [-1, 1]) sparse_indices = array_ops.concat( 1, [acc_indices_2d, acc_ids_2d_int32], "sparse_indices") # Create sampled_logits_shape = [batch_size, num_sampled] sampled_logits_shape = array_ops.concat( 0, [array_ops.shape(labels)[:1], array_ops.expand_dims(num_sampled, 0)]) if sampled_logits.dtype != acc_weights.dtype: acc_weights = math_ops.cast(acc_weights, sampled_logits.dtype) sampled_logits += sparse_ops.sparse_to_dense( sparse_indices, sampled_logits_shape, acc_weights, default_value=0.0, validate_indices=False) if subtract_log_q: # Subtract log of Q(l), prior probability that l appears in sampled. true_logits -= math_ops.log(true_expected_count) sampled_logits -= math_ops.log(sampled_expected_count) # Construct output logits and labels. The true labels/logits start at col 0. out_logits = array_ops.concat(1, [true_logits, sampled_logits]) # true_logits is a float tensor, ones_like(true_logits) is a float tensor # of ones. We then divide by num_true to ensure the per-example labels sum # to 1.0, i.e. form a proper probability distribution. out_labels = array_ops.concat( 1, [array_ops.ones_like(true_logits) / num_true, array_ops.zeros_like(sampled_logits)]) return out_logits, out_labels def nce_loss(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, remove_accidental_hits=False, partition_strategy="mod", name="nce_loss"): """Computes and returns the noise-contrastive estimation training loss. See [Noise-contrastive estimation: A new estimation principle for unnormalized statistical models] (http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf). Also see our [Candidate Sampling Algorithms Reference] (../../extras/candidate_sampling.pdf) Note: In the case where `num_true` > 1, we assign to each target class the target probability 1 / `num_true` so that the target probabilities sum to 1 per-example. Note: It would be useful to allow a variable number of target classes per example. We hope to provide this functionality in a future release. For now, if you have a variable number of target classes, you can pad them out to a constant number by either repeating them or by padding with an otherwise unused class. Args: weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor` objects whose concatenation along dimension 0 has shape [num_classes, dim]. The (possibly-partitioned) class embeddings. biases: A `Tensor` of shape `[num_classes]`. The class biases. inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. num_sampled: An `int`. The number of classes to randomly sample per batch. num_classes: An `int`. The number of possible classes. num_true: An `int`. The number of target classes per training example. sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`, `sampled_expected_count`) returned by a `*_candidate_sampler` function. (if None, we default to `log_uniform_candidate_sampler`) remove_accidental_hits: A `bool`. Whether to remove "accidental hits" where a sampled class equals one of the target classes. If set to `True`, this is a "Sampled Logistic" loss instead of NCE, and we are learning to generate log-odds instead of log probabilities. See our [Candidate Sampling Algorithms Reference] (../../extras/candidate_sampling.pdf). Default is False. partition_strategy: A string specifying the partitioning strategy, relevant if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported. Default is `"mod"`. See `tf.nn.embedding_lookup` for more details. name: A name for the operation (optional). Returns: A `batch_size` 1-D tensor of per-example NCE losses. """ logits, labels = _compute_sampled_logits( weights, biases, inputs, labels, num_sampled, num_classes, num_true=num_true, sampled_values=sampled_values, subtract_log_q=True, remove_accidental_hits=remove_accidental_hits, partition_strategy=partition_strategy, name=name) sampled_losses = sigmoid_cross_entropy_with_logits(logits, labels, name="sampled_losses") # sampled_losses is batch_size x {true_loss, sampled_losses...} # We sum out true and sampled losses. return _sum_rows(sampled_losses) def sampled_softmax_loss(weights, biases, inputs, labels, num_sampled, num_classes, num_true=1, sampled_values=None, remove_accidental_hits=True, partition_strategy="mod", name="sampled_softmax_loss"): """Computes and returns the sampled softmax training loss. This is a faster way to train a softmax classifier over a huge number of classes. This operation is for training only. It is generally an underestimate of the full softmax loss. At inference time, you can compute full softmax probabilities with the expression `tf.nn.softmax(tf.matmul(inputs, tf.transpose(weights)) + biases)`. See our [Candidate Sampling Algorithms Reference] (../../extras/candidate_sampling.pdf) Also see Section 3 of [Jean et al., 2014](http://arxiv.org/abs/1412.2007) ([pdf](http://arxiv.org/pdf/1412.2007.pdf)) for the math. Args: weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor` objects whose concatenation along dimension 0 has shape [num_classes, dim]. The (possibly-sharded) class embeddings. biases: A `Tensor` of shape `[num_classes]`. The class biases. inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. Note that this format differs from the `labels` argument of `nn.softmax_cross_entropy_with_logits`. num_sampled: An `int`. The number of classes to randomly sample per batch. num_classes: An `int`. The number of possible classes. num_true: An `int`. The number of target classes per training example. sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`, `sampled_expected_count`) returned by a `*_candidate_sampler` function. (if None, we default to `log_uniform_candidate_sampler`) remove_accidental_hits: A `bool`. whether to remove "accidental hits" where a sampled class equals one of the target classes. Default is True. partition_strategy: A string specifying the partitioning strategy, relevant if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported. Default is `"mod"`. See `tf.nn.embedding_lookup` for more details. name: A name for the operation (optional). Returns: A `batch_size` 1-D tensor of per-example sampled softmax losses. """ logits, labels = _compute_sampled_logits( weights, biases, inputs, labels, num_sampled, num_classes, num_true=num_true, sampled_values=sampled_values, subtract_log_q=True, remove_accidental_hits=remove_accidental_hits, partition_strategy=partition_strategy, name=name) sampled_losses = nn_ops.softmax_cross_entropy_with_logits(logits, labels) # sampled_losses is a [batch_size] tensor. return sampled_losses # TODO(cwhipkey): sigmoid and tanh should not be exposed from tf.nn. __all__ = make_all(__name__) __all__.append("zero_fraction") # documented in training.py # Modules whitelisted for reference through tf.nn. # TODO(cwhipkey): migrate callers to use the submodule directly. __all__.extend(["nn_ops", "rnn_cell", "seq2seq"]) # Symbols whitelisted for export without documentation. # TODO(cwhipkey): review these and move to contrib or expose through # documentation. __all__.extend([ "all_candidate_sampler", "batch_norm_with_global_normalization", "batch_normalization", "conv2d_backprop_filter", "conv2d_backprop_input", "depthwise_conv2d_native", "lrn", "relu_layer", "xw_plus_b", ])
tensorflow/python/ops/nn.py
51,131
Helper function for nce_loss and sampled_softmax_loss functions. Computes sampled output training logits and labels suitable for implementing e.g. noise-contrastive estimation (see nce_loss) or sampled softmax (see sampled_softmax_loss). Note: In the case where num_true > 1, we assign to each target class the target probability 1 / num_true so that the target probabilities sum to 1 per-example. Args: weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor` objects whose concatenation along dimension 0 has shape `[num_classes, dim]`. The (possibly-partitioned) class embeddings. biases: A `Tensor` of shape `[num_classes]`. The class biases. inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. Note that this format differs from the `labels` argument of `nn.softmax_cross_entropy_with_logits`. num_sampled: An `int`. The number of classes to randomly sample per batch. num_classes: An `int`. The number of possible classes. num_true: An `int`. The number of target classes per training example. sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`, `sampled_expected_count`) returned by a `*_candidate_sampler` function. (if None, we default to `log_uniform_candidate_sampler`) subtract_log_q: A `bool`. whether to subtract the log expected count of the labels in the sample to get the logits of the true labels. Default is True. Turn off for Negative Sampling. remove_accidental_hits: A `bool`. whether to remove "accidental hits" where a sampled class equals one of the target classes. Default is False. partition_strategy: A string specifying the partitioning strategy, relevant if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported. Default is `"mod"`. See `tf.nn.embedding_lookup` for more details. name: A name for the operation (optional). Returns: out_logits, out_labels: `Tensor` objects each with shape `[batch_size, num_true + num_sampled]`, for passing to either `nn.sigmoid_cross_entropy_with_logits` (NCE) or `nn.softmax_cross_entropy_with_logits` (sampled softmax). Returns a vector summing up each row of the matrix x. Batch normalization. This op is deprecated. See `tf.nn.batch_normalization`. Args: t: A 4D input Tensor. m: A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, or a saved moving average thereof. v: A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, or a saved moving average thereof. beta: A 1D beta Tensor with size matching the last dimension of t. An offset to be added to the normalized tensor. gamma: A 1D gamma Tensor with size matching the last dimension of t. If "scale_after_normalization" is true, this tensor will be multiplied with the normalized tensor. variance_epsilon: A small float number to avoid dividing by 0. scale_after_normalization: A bool indicating whether the resulted tensor needs to be multiplied with gamma. name: A name for this operation (optional). Returns: A batch-normalized `t`. Batch normalization. As described in http://arxiv.org/abs/1502.03167. Normalizes a tensor by `mean` and `variance`, and applies (optionally) a `scale` \\(\gamma\\) to it, as well as an `offset` \\(\beta\\): \\(\frac{\gamma(x-\mu)}{\sigma}+\beta\\) `mean`, `variance`, `offset` and `scale` are all expected to be of one of two shapes: * In all generality, they can have the same number of dimensions as the input `x`, with identical sizes as `x` for the dimensions that are not normalized over (the 'depth' dimension(s)), and dimension 1 for the others which are being normalized over. `mean` and `variance` in this case would typically be the outputs of `tf.nn.moments(..., keep_dims=True)` during training, or running averages thereof during inference. * In the common case where the 'depth' dimension is the last dimension in the input tensor `x`, they may be one dimensional tensors of the same size as the 'depth' dimension. This is the case for example for the common `[batch, depth]` layout of fully-connected layers, and `[batch, height, width, depth]` for convolutions. `mean` and `variance` in this case would typically be the outputs of `tf.nn.moments(..., keep_dims=False)` during training, or running averages thereof during inference. Args: x: Input `Tensor` of arbitrary dimensionality. mean: A mean `Tensor`. variance: A variance `Tensor`. offset: An offset `Tensor`, often denoted \\(\beta\\) in equations, or None. If present, will be added to the normalized tensor. scale: A scale `Tensor`, often denoted \\(\gamma\\) in equations, or `None`. If present, the scale is applied to the normalized tensor. variance_epsilon: A small float number to avoid dividing by 0. name: A name for this operation (optional). Returns: the normalized, scaled, offset tensor. Depthwise 2-D convolution. Given an input tensor of shape `[batch, in_height, in_width, in_channels]` and a filter tensor of shape `[filter_height, filter_width, in_channels, channel_multiplier]` containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies a different filter to each input channel (expanding from 1 channel to `channel_multiplier` channels for each), then concatenates the results together. The output has `in_channels * channel_multiplier` channels. In detail, output[b, i, j, k * channel_multiplier + q] = sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] * filter[di, dj, k, q] Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertical strides, `strides = [1, stride, stride, 1]`. Args: input: 4-D with shape `[batch, in_height, in_width, in_channels]`. filter: 4-D with shape `[filter_height, filter_width, in_channels, channel_multiplier]`. strides: 1-D of size 4. The stride of the sliding window for each dimension of `input`. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the [comment here](https://www.tensorflow.org/api_docs/python/nn.html#convolution) name: A name for this operation (optional). Returns: A 4-D `Tensor` of shape `[batch, out_height, out_width, in_channels * channel_multiplier].` Normalizes along dimension `dim` using an L2 norm. For a 1-D tensor with `dim = 0`, computes output = x / sqrt(max(sum(x**2), epsilon)) For `x` with more dimensions, independently normalizes each 1-D slice along dimension `dim`. Args: x: A `Tensor`. dim: Dimension along which to normalize. epsilon: A lower bound value for the norm. Will use `sqrt(epsilon)` as the divisor if `norm < sqrt(epsilon)`. name: A name for this operation (optional). Returns: A `Tensor` with the same shape as `x`. Calculate the mean and variance of `x`. The mean and variance are calculated by aggregating the contents of `x` across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean and variance of a vector. When using these moments for batch normalization (see `tf.nn.batch_normalization`): * for so-called "global normalization", used with convolutional filters with shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`. * for simple batch normalization pass `axes=[0]` (batch only). Args: x: A `Tensor`. axes: array of ints. Axes along which to compute mean and variance. shift: A `Tensor` containing the value by which to shift the data for numerical stability, or `None` if no shift is to be performed. A shift close to the true mean provides the most numerically stable results. keep_dims: produce moments with the same dimensionality as the input. name: Name used to scope the operations that compute the moments. Returns: Two `Tensor` objects: `mean` and `variance`. Computes and returns the noise-contrastive estimation training loss. See [Noise-contrastive estimation: A new estimation principle for unnormalized statistical models] (http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf). Also see our [Candidate Sampling Algorithms Reference] (../../extras/candidate_sampling.pdf) Note: In the case where `num_true` > 1, we assign to each target class the target probability 1 / `num_true` so that the target probabilities sum to 1 per-example. Note: It would be useful to allow a variable number of target classes per example. We hope to provide this functionality in a future release. For now, if you have a variable number of target classes, you can pad them out to a constant number by either repeating them or by padding with an otherwise unused class. Args: weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor` objects whose concatenation along dimension 0 has shape [num_classes, dim]. The (possibly-partitioned) class embeddings. biases: A `Tensor` of shape `[num_classes]`. The class biases. inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. num_sampled: An `int`. The number of classes to randomly sample per batch. num_classes: An `int`. The number of possible classes. num_true: An `int`. The number of target classes per training example. sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`, `sampled_expected_count`) returned by a `*_candidate_sampler` function. (if None, we default to `log_uniform_candidate_sampler`) remove_accidental_hits: A `bool`. Whether to remove "accidental hits" where a sampled class equals one of the target classes. If set to `True`, this is a "Sampled Logistic" loss instead of NCE, and we are learning to generate log-odds instead of log probabilities. See our [Candidate Sampling Algorithms Reference] (../../extras/candidate_sampling.pdf). Default is False. partition_strategy: A string specifying the partitioning strategy, relevant if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported. Default is `"mod"`. See `tf.nn.embedding_lookup` for more details. name: A name for the operation (optional). Returns: A `batch_size` 1-D tensor of per-example NCE losses. Calculate the mean and variance of based on the sufficient statistics. Args: counts: A `Tensor` containing a the total count of the data (one value). mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly shifted) sum of the elements to average over. variance_ss: A `Tensor` containing the variance sufficient statistics: the (possibly shifted) squared sum of the data to compute the variance over. shift: A `Tensor` containing the value by which the data is shifted for numerical stability, or `None` if no shift was performed. name: Name used to scope the operations that compute the moments. Returns: Two `Tensor` objects: `mean` and `variance`. Computes Relu(x * weight + biases). Args: x: a 2D tensor. Dimensions typically: batch, in_units weights: a 2D tensor. Dimensions typically: in_units, out_units biases: a 1D tensor. Dimensions: out_units name: A name for the operation (optional). If not specified "nn_relu_layer" is used. Returns: A 2-D Tensor computing relu(matmul(x, weights) + biases). Dimensions typically: batch, out_units. Computes and returns the sampled softmax training loss. This is a faster way to train a softmax classifier over a huge number of classes. This operation is for training only. It is generally an underestimate of the full softmax loss. At inference time, you can compute full softmax probabilities with the expression `tf.nn.softmax(tf.matmul(inputs, tf.transpose(weights)) + biases)`. See our [Candidate Sampling Algorithms Reference] (../../extras/candidate_sampling.pdf) Also see Section 3 of [Jean et al., 2014](http://arxiv.org/abs/1412.2007) ([pdf](http://arxiv.org/pdf/1412.2007.pdf)) for the math. Args: weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor` objects whose concatenation along dimension 0 has shape [num_classes, dim]. The (possibly-sharded) class embeddings. biases: A `Tensor` of shape `[num_classes]`. The class biases. inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The target classes. Note that this format differs from the `labels` argument of `nn.softmax_cross_entropy_with_logits`. num_sampled: An `int`. The number of classes to randomly sample per batch. num_classes: An `int`. The number of possible classes. num_true: An `int`. The number of target classes per training example. sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`, `sampled_expected_count`) returned by a `*_candidate_sampler` function. (if None, we default to `log_uniform_candidate_sampler`) remove_accidental_hits: A `bool`. whether to remove "accidental hits" where a sampled class equals one of the target classes. Default is True. partition_strategy: A string specifying the partitioning strategy, relevant if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported. Default is `"mod"`. See `tf.nn.embedding_lookup` for more details. name: A name for the operation (optional). Returns: A `batch_size` 1-D tensor of per-example sampled softmax losses. 2-D convolution with separable filters. Performs a depthwise convolution that acts separately on channels followed by a pointwise convolution that mixes channels. Note that this is separability between dimensions `[1, 2]` and `3`, not spatial separability between dimensions `1` and `2`. In detail, output[b, i, j, k] = sum_{di, dj, q, r] input[b, strides[1] * i + di, strides[2] * j + dj, q] * depthwise_filter[di, dj, q, r] * pointwise_filter[0, 0, q * channel_multiplier + r, k] `strides` controls the strides for the depthwise convolution only, since the pointwise convolution has implicit strides of `[1, 1, 1, 1]`. Must have `strides[0] = strides[3] = 1`. For the most common case of the same horizontal and vertical strides, `strides = [1, stride, stride, 1]`. Args: input: 4-D `Tensor` with shape `[batch, in_height, in_width, in_channels]`. depthwise_filter: 4-D `Tensor` with shape `[filter_height, filter_width, in_channels, channel_multiplier]`. Contains `in_channels` convolutional filters of depth 1. pointwise_filter: 4-D `Tensor` with shape `[1, 1, channel_multiplier * in_channels, out_channels]`. Pointwise filter to mix channels after `depthwise_filter` has convolved spatially. strides: 1-D of size 4. The strides for the depthwise convolution for each dimension of `input`. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See the [comment here](https://www.tensorflow.org/api_docs/python/nn.html#convolution) name: A name for this operation (optional). Returns: A 4-D `Tensor` of shape `[batch, out_height, out_width, out_channels]`. Raises: ValueError: If channel_multiplier * in_channels > out_channels, which means that the separable convolution is overparameterized. Computes sigmoid cross entropy given `logits`. Measures the probability error in discrete classification tasks in which each class is independent and not mutually exclusive. For instance, one could perform multilabel classification where a picture can contain both an elephant and a dog at the same time. For brevity, let `x = logits`, `z = targets`. The logistic loss is z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x))) = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) = (1 - z) * x + log(1 + exp(-x)) = x - x * z + log(1 + exp(-x)) For x < 0, to avoid overflow in exp(-x), we reformulate the above x - x * z + log(1 + exp(-x)) = log(exp(x)) - x * z + log(1 + exp(-x)) = - x * z + log(1 + exp(x)) Hence, to ensure stability and avoid overflow, the implementation uses this equivalent formulation max(x, 0) - x * z + log(1 + exp(-abs(x))) `logits` and `targets` must have the same type and shape. Args: logits: A `Tensor` of type `float32` or `float64`. targets: A `Tensor` of the same type and shape as `logits`. name: A name for the operation (optional). Returns: A `Tensor` of the same shape as `logits` with the componentwise logistic losses. Raises: ValueError: If `logits` and `targets` do not have the same shape. Calculate the sufficient statistics for the mean and variance of `x`. These sufficient statistics are computed using the one pass algorithm on an input that's optionally shifted. See: https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data Args: x: A `Tensor`. axes: Array of ints. Axes along which to compute mean and variance. shift: A `Tensor` containing the value by which to shift the data for numerical stability, or `None` if no shift is to be performed. A shift close to the true mean provides the most numerically stable results. keep_dims: produce statistics with the same dimensionality as the input. name: Name used to scope the operations that compute the sufficient stats. Returns: Four `Tensor` objects of the same type as `x`: * the count (number of elements to average over). * the (possibly shifted) sum of the elements in the array. * the (possibly shifted) sum of squares of the elements in the array. * the shift by which the mean must be corrected or None if `shift` is None. Computes a weighted cross entropy. This is like `sigmoid_cross_entropy_with_logits()` except that `pos_weight`, allows one to trade off recall and precision by up- or down-weighting the cost of a positive error relative to a negative error. The usual cross-entropy cost is defined as: targets * -log(sigmoid(logits)) + (1 - targets) * -log(1 - sigmoid(logits)) The argument `pos_weight` is used as a multiplier for the positive targets: targets * -log(sigmoid(logits)) * pos_weight + (1 - targets) * -log(1 - sigmoid(logits)) For brevity, let `x = logits`, `z = targets`, `q = pos_weight`. The loss is: qz * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) = qz * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x))) = qz * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) = qz * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) = (1 - z) * x + (qz + 1 - z) * log(1 + exp(-x)) = (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x)) Setting `l = (1 + (q - 1) * z)`, to ensure stability and avoid overflow, the implementation uses (1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0)) `logits` and `targets` must have the same type and shape. Args: logits: A `Tensor` of type `float32` or `float64`. targets: A `Tensor` of the same type and shape as `logits`. pos_weight: A coefficient to use on the positive examples. name: A name for the operation (optional). Returns: A `Tensor` of the same shape as `logits` with the componentwise weightedlogistic losses. Raises: ValueError: If `logits` and `targets` do not have the same shape. Returns the fraction of zeros in `value`. If `value` is empty, the result is `nan`. This is useful in summaries to measure and report sparsity. For example, z = tf.Relu(...) summ = tf.scalar_summary('sparsity', tf.nn.zero_fraction(z)) Args: value: A tensor of numeric type. name: A name for the operation (optional). Returns: The fraction of zeros in `value`, with type `float32`. ## Activation Functions The activation ops provide different types of nonlinearities for use in neural networks. These include smooth nonlinearities (`sigmoid`, `tanh`, `elu`, `softplus`, and `softsign`), continuous but not everywhere differentiable functions (`relu`, `relu6`, and `relu_x`), and random regularization (`dropout`). All activation ops apply componentwise, and produce a tensor of the same shape as the input tensor. @@relu @@relu6 @@elu @@softplus @@softsign @@dropout @@bias_add @@sigmoid @@tanh ## Convolution The convolution ops sweep a 2-D filter over a batch of images, applying the filter to each window of each image of the appropriate size. The different ops trade off between generic vs. specific filters: * `conv2d`: Arbitrary filters that can mix channels together. * `depthwise_conv2d`: Filters that operate on each channel independently. * `separable_conv2d`: A depthwise spatial filter followed by a pointwise filter. Note that although these ops are called "convolution", they are strictly speaking "cross-correlation" since the filter is combined with an input window without reversing the filter. For details, see [the properties of cross-correlation](https://en.wikipedia.org/wiki/Cross-correlation#Properties). The filter is applied to image patches of the same size as the filter and strided according to the `strides` argument. `strides = [1, 1, 1, 1]` applies the filter to a patch at every offset, `strides = [1, 2, 2, 1]` applies the filter to every other image patch in each dimension, etc. Ignoring channels for the moment, and assume that the 4-D `input` has shape `[batch, in_height, in_width, ...]` and the 4-D `filter` has shape `[filter_height, filter_width, ...]`, then the spatial semantics of the convolution ops are as follows: first, according to the padding scheme chosen as `'SAME'` or `'VALID'`, the output size and the padding pixels are computed. For the `'SAME'` padding, the output height and width are computed as: out_height = ceil(float(in_height) / float(strides[1])) out_width = ceil(float(in_width) / float(strides[2])) and the padding on the top and left are computed as: pad_along_height = ((out_height - 1) * strides[1] + filter_height - in_height) pad_along_width = ((out_width - 1) * strides[2] + filter_width - in_width) pad_top = pad_along_height / 2 pad_left = pad_along_width / 2 Note that the division by 2 means that there might be cases when the padding on both sides (top vs bottom, right vs left) are off by one. In this case, the bottom and right sides always get the one additional padded pixel. For example, when `pad_along_height` is 5, we pad 2 pixels at the top and 3 pixels at the bottom. Note that this is different from existing libraries such as cuDNN and Caffe, which explicitly specify the number of padded pixels and always pad the same number of pixels on both sides. For the `'VALID`' padding, the output height and width are computed as: out_height = ceil(float(in_height - filter_height + 1) / float(strides[1])) out_width = ceil(float(in_width - filter_width + 1) / float(strides[2])) and the padding values are always zero. The output is then computed as output[b, i, j, :] = sum_{di, dj} input[b, strides[1] * i + di - pad_top, strides[2] * j + dj - pad_left, ...] * filter[di, dj, ...] where any value outside the original input image region are considered zero ( i.e. we pad zero values around the border of the image). Since `input` is 4-D, each `input[b, i, j, :]` is a vector. For `conv2d`, these vectors are multiplied by the `filter[di, dj, :, :]` matrices to produce new vectors. For `depthwise_conv_2d`, each scalar component `input[b, i, j, k]` is multiplied by a vector `filter[di, dj, k]`, and all the vectors are concatenated. @@conv2d @@depthwise_conv2d @@separable_conv2d @@atrous_conv2d @@conv2d_transpose @@conv3d ## Pooling The pooling ops sweep a rectangular window over the input tensor, computing a reduction operation for each window (average, max, or max with argmax). Each pooling op uses rectangular windows of size `ksize` separated by offset `strides`. For example, if `strides` is all ones every window is used, if `strides` is all twos every other window is used in each dimension, etc. In detail, the output is output[i] = reduce(value[strides * i:strides * i + ksize]) where the indices also take into consideration the padding values. Please refer to the `Convolution` section for details about the padding calculation. @@avg_pool @@max_pool @@max_pool_with_argmax @@avg_pool3d @@max_pool3d ## Morphological filtering Morphological operators are non-linear filters used in image processing. [Greyscale morphological dilation] (https://en.wikipedia.org/wiki/Dilation_(morphology)) is the max-sum counterpart of standard sum-product convolution: output[b, y, x, c] = max_{dy, dx} input[b, strides[1] * y + rates[1] * dy, strides[2] * x + rates[2] * dx, c] + filter[dy, dx, c] The `filter` is usually called structuring function. Max-pooling is a special case of greyscale morphological dilation when the filter assumes all-zero values (a.k.a. flat structuring function). [Greyscale morphological erosion] (https://en.wikipedia.org/wiki/Erosion_(morphology)) is the min-sum counterpart of standard sum-product convolution: output[b, y, x, c] = min_{dy, dx} input[b, strides[1] * y - rates[1] * dy, strides[2] * x - rates[2] * dx, c] - filter[dy, dx, c] Dilation and erosion are dual to each other. The dilation of the input signal `f` by the structuring signal `g` is equal to the negation of the erosion of `-f` by the reflected `g`, and vice versa. Striding and padding is carried out in exactly the same way as in standard convolution. Please refer to the `Convolution` section for details. @@dilation2d @@erosion2d ## Normalization Normalization is useful to prevent neurons from saturating when inputs may have varying scale, and to aid generalization. @@l2_normalize @@local_response_normalization @@sufficient_statistics @@normalize_moments @@moments ## Losses The loss ops measure error between two tensors, or between a tensor and zero. These can be used for measuring accuracy of a network in a regression task or for regularization purposes (weight decay). @@l2_loss ## Classification TensorFlow provides several operations that help you perform classification. @@sigmoid_cross_entropy_with_logits @@softmax @@log_softmax @@softmax_cross_entropy_with_logits @@sparse_softmax_cross_entropy_with_logits @@weighted_cross_entropy_with_logits ## Embeddings TensorFlow provides library support for looking up values in embedding tensors. @@embedding_lookup @@embedding_lookup_sparse ## Recurrent Neural Networks TensorFlow provides a number of methods for constructing Recurrent Neural Networks. Most accept an `RNNCell`-subclassed object (see the documentation for `tf.nn.rnn_cell`). @@dynamic_rnn @@rnn @@state_saving_rnn @@bidirectional_rnn ## Conectionist Temporal Classification (CTC) @@ctc_loss @@ctc_greedy_decoder @@ctc_beam_search_decoder ## Evaluation The evaluation ops are useful for measuring the performance of a network. Since they are nondifferentiable, they are typically used at evaluation time. @@top_k @@in_top_k ## Candidate Sampling Do you want to train a multiclass or multilabel model with thousands or millions of output classes (for example, a language model with a large vocabulary)? Training with a full Softmax is slow in this case, since all of the classes are evaluated for every training example. Candidate Sampling training algorithms can speed up your step times by only considering a small randomly-chosen subset of contrastive classes (called candidates) for each batch of training examples. See our [Candidate Sampling Algorithms Reference] (../../extras/candidate_sampling.pdf) ### Sampled Loss Functions TensorFlow provides the following sampled loss functions for faster training. @@nce_loss @@sampled_softmax_loss ### Candidate Samplers TensorFlow provides the following samplers for randomly sampling candidate classes when using one of the sampled loss functions above. @@uniform_candidate_sampler @@log_uniform_candidate_sampler @@learned_unigram_candidate_sampler @@fixed_unigram_candidate_sampler ### Miscellaneous candidate sampling utilities @@compute_accidental_hits Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================= pylint: disable=unused-import,g-bad-import-order pylint: disable=redefined-builtin Bring more nn-associated functionality into this package. go/tf-wildcard-import pylint: disable=wildcard-import pylint: enable=wildcard-import The logistic loss formula from above is x - x * z + log(1 + exp(-x)) For x < 0, a more numerically stable formula is -x * z + log(1 + exp(x)) Note that these two expressions can be combined into the following: max(x, 0) - x * z + log(1 + exp(-abs(x))) To allow computing gradients at zero, we define custom versions of max and abs functions. The logistic loss formula from above is (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(-x)) For x < 0, a more numerically stable formula is (1 - z) * x + (1 + (q - 1) * z) * log(1 + exp(x)) - l * x To avoid branching, we use the combined version (1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0)) A shape is required to statically compute the number of separable filters. Sanity checks, if shape information is available for the inputs. The layout of the ops in the graph are expected to be as follows: depthwise_conv2d // Conv2D op corresponding to native deptwise conv. separable_conv2d // Conv2D op corresponding to the pointwise conv. shape needs to be inferred at runtime. no shift. no shift. The dynamic range of fp16 is too limited to support the collection of sufficient statistics. As a workaround we simply perform the operations on 32-bit floats before converting the mean and variance back to fp16 _sum_rows(x) is equivalent to math_ops.reduce_sum(x, 1) when x is a matrix. The gradient of _sum_rows(x) is more efficient than reduce_sum(x, 1)'s gradient in today's implementation. Therefore, we use _sum_rows(x) in the nce_loss() computation since the loss is mostly used for training. Sample the negative labels. sampled shape: [num_sampled] tensor true_expected_count shape = [batch_size, 1] tensor sampled_expected_count shape = [num_sampled] tensor NOTE: pylint cannot tell that 'sampled_values' is a sequence pylint: disable=unpacking-non-sequence pylint: enable=unpacking-non-sequence labels_flat is a [batch_size * num_true] tensor sampled is a [num_sampled] int tensor weights shape is [num_classes, dim] true_w shape is [batch_size * num_true, dim] true_b is a [batch_size * num_true] tensor inputs shape is [batch_size, dim] true_w shape is [batch_size * num_true, dim] row_wise_dots is [batch_size, num_true, dim] We want the row-wise dot plus biases which yields a [batch_size, num_true] tensor of true_logits. Lookup weights and biases for sampled labels. sampled_w shape is [num_sampled, dim] sampled_b is a [num_sampled] float tensor inputs has shape [batch_size, dim] sampled_w has shape [num_sampled, dim] sampled_b has shape [num_sampled] Apply X*W'+B, which yields [batch_size, num_sampled] This is how SparseToDense expects the indices. Create sampled_logits_shape = [batch_size, num_sampled] Subtract log of Q(l), prior probability that l appears in sampled. Construct output logits and labels. The true labels/logits start at col 0. true_logits is a float tensor, ones_like(true_logits) is a float tensor of ones. We then divide by num_true to ensure the per-example labels sum to 1.0, i.e. form a proper probability distribution. sampled_losses is batch_size x {true_loss, sampled_losses...} We sum out true and sampled losses. sampled_losses is a [batch_size] tensor. TODO(cwhipkey): sigmoid and tanh should not be exposed from tf.nn. documented in training.py Modules whitelisted for reference through tf.nn. TODO(cwhipkey): migrate callers to use the submodule directly. Symbols whitelisted for export without documentation. TODO(cwhipkey): review these and move to contrib or expose through documentation.
33,307
en
0.770274
"""Benchmarks of Lasso regularization path computation using Lars and CD The input data is mostly low rank but is a fat infinite tail. """ from collections import defaultdict import gc import sys from time import time import numpy as np from sklearn.linear_model import lars_path, lars_path_gram from sklearn.linear_model import lasso_path from sklearn.datasets import make_regression def compute_bench(samples_range, features_range): it = 0 results = defaultdict(lambda: []) max_it = len(samples_range) * len(features_range) for n_samples in samples_range: for n_features in features_range: it += 1 print('====================') print('Iteration %03d of %03d' % (it, max_it)) print('====================') dataset_kwargs = { 'n_samples': n_samples, 'n_features': n_features, 'n_informative': n_features // 10, 'effective_rank': min(n_samples, n_features) / 10, #'effective_rank': None, 'bias': 0.0, } print("n_samples: %d" % n_samples) print("n_features: %d" % n_features) X, y = make_regression(**dataset_kwargs) gc.collect() print("benchmarking lars_path (with Gram):", end='') sys.stdout.flush() tstart = time() G = np.dot(X.T, X) # precomputed Gram matrix Xy = np.dot(X.T, y) lars_path_gram(Xy=Xy, Gram=G, n_samples=y.size, method='lasso') delta = time() - tstart print("%0.3fs" % delta) results['lars_path (with Gram)'].append(delta) gc.collect() print("benchmarking lars_path (without Gram):", end='') sys.stdout.flush() tstart = time() lars_path(X, y, method='lasso') delta = time() - tstart print("%0.3fs" % delta) results['lars_path (without Gram)'].append(delta) gc.collect() print("benchmarking lasso_path (with Gram):", end='') sys.stdout.flush() tstart = time() lasso_path(X, y, precompute=True) delta = time() - tstart print("%0.3fs" % delta) results['lasso_path (with Gram)'].append(delta) gc.collect() print("benchmarking lasso_path (without Gram):", end='') sys.stdout.flush() tstart = time() lasso_path(X, y, precompute=False) delta = time() - tstart print("%0.3fs" % delta) results['lasso_path (without Gram)'].append(delta) return results if __name__ == '__main__': from mpl_toolkits.mplot3d import axes3d # register the 3d projection import matplotlib.pyplot as plt samples_range = np.linspace(10, 500, 3).astype(int) features_range = np.linspace(10, 1400 , 3).astype(int) results = compute_bench(samples_range, features_range) max_time = max(max(t) for t in results.values()) fig = plt.figure('scikit-learn Lasso path benchmark results') i = 1 for c, (label, timings) in zip('bcry', sorted(results.items())): ax = fig.add_subplot(2, 2, i, projection='3d') X, Y = np.meshgrid(samples_range, features_range) Z = np.asarray(timings).reshape(samples_range.shape[0], features_range.shape[0]) # plot the actual surface ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8) # dummy point plot to stick the legend to since surface plot do not # support legends (yet?) # ax.plot([1], [1], [1], color=c, label=label) ax.set_xlabel('n_samples') ax.set_ylabel('n_features') ax.set_zlabel('Time (s)') ax.set_zlim3d(0.0, max_time * 1.1) ax.set_title(label) # ax.legend() i += 1 #plt.show()
benchmarks/lasso_replicas/bench_plot_lasso_path_83.py
3,979
Benchmarks of Lasso regularization path computation using Lars and CD The input data is mostly low rank but is a fat infinite tail. 'effective_rank': None, precomputed Gram matrix register the 3d projection plot the actual surface dummy point plot to stick the legend to since surface plot do not support legends (yet?) ax.plot([1], [1], [1], color=c, label=label) ax.legend()plt.show()
388
en
0.704973
# -*- encoding: utf-8 -*- import re from oops.utils import sudo_support @sudo_support def match(command, settings): return ('command not found' in command.stderr.lower() and u' ' in command.script) @sudo_support def get_new_command(command, settings): return re.sub(u' ', ' ', command.script)
oops/rules/fix_alt_space.py
320
-*- encoding: utf-8 -*-
23
en
0.76908
from mmcv.cnn import ConvModule from torch import nn from torch.utils import checkpoint as cp from .se_layer import SELayer class InvertedResidual(nn.Module): """InvertedResidual block for MobileNetV2. Args: in_channels (int): The input channels of the InvertedResidual block. out_channels (int): The output channels of the InvertedResidual block. stride (int): Stride of the middle (first) 3x3 convolution. expand_ratio (int): Adjusts number of channels of the hidden layer in InvertedResidual by this amount. dilation (int): Dilation rate of depthwise conv. Default: 1 conv_cfg (dict): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN'). act_cfg (dict): Config dict for activation layer. Default: dict(type='ReLU6'). with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. Returns: Tensor: The output tensor. """ def __init__(self, in_channels, out_channels, stride, expand_ratio, dilation=1, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU6'), with_cp=False): super(InvertedResidual, self).__init__() self.stride = stride assert stride in [1, 2], f'stride must in [1, 2]. ' \ f'But received {stride}.' self.with_cp = with_cp self.use_res_connect = self.stride == 1 and in_channels == out_channels hidden_dim = int(round(in_channels * expand_ratio)) layers = [] if expand_ratio != 1: layers.append( ConvModule( in_channels=in_channels, out_channels=hidden_dim, kernel_size=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) layers.extend([ ConvModule( in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, groups=hidden_dim, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg), ConvModule( in_channels=hidden_dim, out_channels=out_channels, kernel_size=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) ]) self.conv = nn.Sequential(*layers) def forward(self, x): def _inner_forward(x): if self.use_res_connect: return x + self.conv(x) else: return self.conv(x) if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) return out class InvertedResidualV3(nn.Module): """Inverted Residual Block for MobileNetV3. Args: in_channels (int): The input channels of this Module. out_channels (int): The output channels of this Module. mid_channels (int): The input channels of the depthwise convolution. kernel_size (int): The kernel size of the depthwise convolution. Default: 3. stride (int): The stride of the depthwise convolution. Default: 1. se_cfg (dict): Config dict for se layer. Default: None, which means no se layer. with_expand_conv (bool): Use expand conv or not. If set False, mid_channels must be the same with in_channels. Default: True. conv_cfg (dict): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN'). act_cfg (dict): Config dict for activation layer. Default: dict(type='ReLU'). with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. Returns: Tensor: The output tensor. """ def __init__(self, in_channels, out_channels, mid_channels, kernel_size=3, stride=1, se_cfg=None, with_expand_conv=True, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'), with_cp=False): super(InvertedResidualV3, self).__init__() self.with_res_shortcut = (stride == 1 and in_channels == out_channels) assert stride in [1, 2] self.with_cp = with_cp self.with_se = se_cfg is not None self.with_expand_conv = with_expand_conv if self.with_se: assert isinstance(se_cfg, dict) if not self.with_expand_conv: assert mid_channels == in_channels if self.with_expand_conv: self.expand_conv = ConvModule( in_channels=in_channels, out_channels=mid_channels, kernel_size=1, stride=1, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) self.depthwise_conv = ConvModule( in_channels=mid_channels, out_channels=mid_channels, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, groups=mid_channels, conv_cfg=dict( type='Conv2dAdaptivePadding') if stride == 2 else conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) if self.with_se: self.se = SELayer(**se_cfg) self.linear_conv = ConvModule( in_channels=mid_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) def forward(self, x): def _inner_forward(x): out = x if self.with_expand_conv: out = self.expand_conv(out) out = self.depthwise_conv(out) if self.with_se: out = self.se(out) out = self.linear_conv(out) if self.with_res_shortcut: return x + out else: return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) return out
mmseg/models/utils/inverted_residual.py
7,213
InvertedResidual block for MobileNetV2. Args: in_channels (int): The input channels of the InvertedResidual block. out_channels (int): The output channels of the InvertedResidual block. stride (int): Stride of the middle (first) 3x3 convolution. expand_ratio (int): Adjusts number of channels of the hidden layer in InvertedResidual by this amount. dilation (int): Dilation rate of depthwise conv. Default: 1 conv_cfg (dict): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN'). act_cfg (dict): Config dict for activation layer. Default: dict(type='ReLU6'). with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. Returns: Tensor: The output tensor. Inverted Residual Block for MobileNetV3. Args: in_channels (int): The input channels of this Module. out_channels (int): The output channels of this Module. mid_channels (int): The input channels of the depthwise convolution. kernel_size (int): The kernel size of the depthwise convolution. Default: 3. stride (int): The stride of the depthwise convolution. Default: 1. se_cfg (dict): Config dict for se layer. Default: None, which means no se layer. with_expand_conv (bool): Use expand conv or not. If set False, mid_channels must be the same with in_channels. Default: True. conv_cfg (dict): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN'). act_cfg (dict): Config dict for activation layer. Default: dict(type='ReLU'). with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. Returns: Tensor: The output tensor.
2,014
en
0.606077
import matplotlib.pyplot as plt import numpy as np import seaborn as sns import pandas as pd import json import glob import os import argparse from typing import Tuple, Union, List from collections import Counter from tqdm import tqdm from multiprocessing import Pool pd.options.mode.chained_assignment = None # default='warn' # ==================================================================== def get_data(img_pth: Union[str, os.PathLike]) -> dict: """Get a single data from the given file.json path""" with open(img_pth, 'r') as f: data = json.load(f) return data def get_original_df( path: Union[str, os.PathLike], filename: str, processes_per_cpu: int = 2) -> Tuple[pd.DataFrame, bool]: """Get a DataFrame from all the can_bus*.json files in the dataset""" save_path = os.path.join(os.getcwd(), 'data_analysis', filename) if os.path.isfile(save_path): print('.npy file exists, loading it...') data = list(np.load(save_path, allow_pickle=True)) else: # Construct the dataset print('.npy file not found, constructing it...') all_data_paths = sorted(glob.glob(os.path.join(path, '**/can_bus*.json'), recursive=True)) with Pool(os.cpu_count() * processes_per_cpu) as p: data = list(tqdm(p.imap(get_data, all_data_paths), total=len(all_data_paths))) np.save(save_path, data) # Create dataframe with the data df = pd.DataFrame(data) print(df.describe()) return df, False # ==================================================================== def get_augmented_df(preloads_name: str) -> Tuple[pd.DataFrame, bool]: """Use the preloads file to load the data; will be augmented, as that's what we did""" assert preloads_name.endswith('.npy') data = np.load(os.path.join(os.getcwd(), '_preloads', preloads_name), allow_pickle=True)[1] df = pd.DataFrame(data) print(df.describe()) return df, True # ==================================================================== def violin_plot(df: pd.DataFrame, save_name: str, augmented: bool) -> None: """Save violin plot for the interesting parameters using df""" directions_dict = {'No Action': 2.0, 'Turn Left': 3.0, 'Turn Right': 4.0, 'Continue Straight': 5.0} # Auxiliary function for setting the quartile lines def set_lines(ax): for l in ax.lines: l.set_linestyle('--') l.set_linewidth(0.6) l.set_color('white') l.set_alpha(0.7) for l in ax.lines[1::3]: l.set_linestyle('-') l.set_linewidth(1.3) l.set_color('black') l.set_alpha(0.8) for key in directions_dict: # Get respective subset of the dataframe data = df[df['directions'] == directions_dict[key]] fig = plt.figure(figsize=(8, 6)) gs = fig.add_gridspec(1, 4) fig.add_subplot(gs[0, 0]) ax = sns.violinplot(y='steer', data=data, color='r', inner='quartile') set_lines(ax) fig.add_subplot(gs[0, 1]) ax = sns.violinplot(y='throttle', data=data, color='g', inner='quartile') set_lines(ax) fig.add_subplot(gs[0, 2]) ax = sns.violinplot(y='brake', data=data, color='b', inner='quartile') set_lines(ax) fig.add_subplot(gs[0, 3]) ax = sns.violinplot(y='speed', data=data, color='m', inner='quartile') set_lines(ax) # When using tight layout, we need the title to be spaced accordingly fig.tight_layout() fig.subplots_adjust(top=0.88) stitle = f'Direction: {key} - $N={len(data)}$ - ${100 * len(data)/len(df):6.3f}$% of total' stitle = f'{stitle} - Augmented' if augmented else stitle fig.suptitle(stitle, fontsize=16) fname = f'{save_name}-{key.replace(" ", "")}' fname = f'{fname}-aug' if augmented else fname fig_name = os.path.join(os.getcwd(), 'data_analysis', save_name, 'violin_plots', f'{fname}.png') os.makedirs(os.path.join(os.getcwd(), 'data_analysis', save_name, 'violin_plots'), exist_ok=True) plt.savefig(fig_name) plt.close() # ==================================================================== def plot_clients(path: Union[str, os.PathLike], df: pd.DataFrame, augmented: bool, speed_factor: float) -> None: """Plot the steer, throttle, brake, and speed of a client during its data collection""" # Some sanity check if path.endswith(os.sep): path = path[:-1] # Get dataset name and make the necessary directories dataset_name = os.path.basename(path) s_path = os.path.join(os.getcwd(), 'data_analysis', dataset_name, 'clients') os.makedirs(s_path, exist_ok=True) # Get the number of clients/cars that collected the data clients = glob.glob(os.path.join(path, '**/*')) clients = [cl for cl in clients if os.path.isdir(cl)] # Remove path of metadata.json num_clients = len(clients) # Total number of frames and for a single client num_frames = len(df) num_frames_per_client = num_frames // num_clients # Aux function def get_change_locs(df: pd.DataFrame, cli: int) -> Tuple[List[int], List[float]]: """Get the index and directions from the df of the actions taken by the client""" df['directions_str'] = df['directions'].astype(str) # In order to compare, turn directions into a string # Shift directions column by 1 (filling the top with the head), and compare to the original df['change'] = df['directions_str'].shift(1, fill_value=df['directions_str'].head(1)) != df['directions_str'] # Get the rows where there's a change index_change = list(df.loc[df['change'] == True].index.values) # Add the first frame index_change = [(cli - 1) * len(df)] + index_change # For these indexes, get the value of the direction dirs = list(df['directions'][index_change].values) # Add the last frame index_change = index_change + [cli * len(df) - 1] return index_change, dirs # Dictionaries containing the name and color for plotting the direction given to the car my_labels = {2.0: 'No Action', 3.0: 'Turn Left', 4.0: 'Turn Right', 5.0: 'Continue Straight'} colors = {2.0: 'gold', 3.0: 'gray', 4.0: 'cyan', 5.0: 'magenta'} # Initialize the total counts per action total_action_counts = Counter({2.0: 0, 3.0: 0, 4.0: 0, 5.0: 0}) max_speed_clients = {} idx_change_clients = {} dirs_clients = {} # Make a plot for each client for client in tqdm(range(1, num_clients + 1), total=num_clients, unit='clients'): if augmented: # Dataframe will have augmented data, which uses center, left, right, center, ... data df_client = df[(client - 1) * num_frames_per_client: client * num_frames_per_client: 3] else: df_client = df[(client - 1) * num_frames_per_client: client * num_frames_per_client] # Augmented data will have been normalized already df_client['speed'] = df_client['speed'].div(speed_factor) # normalize to range [0, 1] # The actual max speed (see if it differs from collected data) actual_max_speed = df_client['speed'].max() max_speed_clients[client] = actual_max_speed # Build the plot fig, ax = plt.subplots(figsize=(48, 16)) fig.tight_layout(rect=[0, 0.03, 1, 0.95]) df_client.plot(y=['steer', 'throttle', 'brake', 'speed'], ax=ax) # Set the area colors for when an direction is taken idx_change, dirs = get_change_locs(df_client, client) for idx, dir in enumerate(dirs): ax.axvspan(idx_change[idx], idx_change[idx + 1], facecolor=colors[dir], alpha=0.5, label=my_labels[dir]) # Save these index and directions for each client idx_change_clients[f'client_{client:02d}'] = [int(idx) for idx in idx_change] dirs_clients[f'client_{client:02d}'] = [float(d) for d in dirs] # Count the directions taken by the client dirs_count = Counter(dirs) # Add this to the total for the whole dataset total_action_counts += dirs_count # Add the counts to the title total_actions = '' for key in my_labels: total_actions += f' - {my_labels[key]}: {dirs_count[key]}' # Set title and x and y axes labels suptitle = f'Client {client} - Actual max speed: {actual_max_speed:.4f}' suptitle = f'{suptitle} - Augmented' if augmented else suptitle suptitle = f'{suptitle}{total_actions}' plt.suptitle(suptitle, fontsize=30) plt.xlabel('Frame idx', fontsize=22) plt.ylabel('Normed value', fontsize=22) plt.xticks(list(range((client - 1) * num_frames_per_client, client * num_frames_per_client + 1, len(df_client) // 20))) # ticks in 5% increments # Fix the legend / remove duplicated areas and labels hand, labl = ax.get_legend_handles_labels() handout = [] lablout = [] for h, l in zip(hand, labl): if l not in lablout: lablout.append(l) handout.append(h) ax.legend(handout, lablout, fontsize='x-large') sname = os.path.join(s_path, f'{dataset_name}_Client{client:02d}') sname = f'{sname}-aug' if augmented else sname plt.savefig(f'{sname}.png', dpi=300) plt.close() # Add summary and save it as a JSON file actions_summary = { 'avg_no_action': total_action_counts[2.0] / num_clients, 'avg_turn_left': total_action_counts[3.0] / num_clients, 'avg_turn_right': total_action_counts[4.0] / num_clients, 'avg_continue_straight': total_action_counts[5.0] / num_clients } summary = { 'num_clients': num_clients, 'num_frames_per_client': num_frames_per_client, 'hours_per_client': num_frames_per_client / (20 * 60 * 60), 'total_action_counts': total_action_counts, 'actions_summary': actions_summary, 'max_speed_clients': max_speed_clients, 'idx_change_clients': idx_change_clients, 'dirs_clients': dirs_clients } with open(os.path.join(s_path, f'{dataset_name}-summary.json'), 'w') as f: json.dump(summary, f, indent=4) # ==================================================================== def main(): parser = argparse.ArgumentParser() parser.add_argument('--path', type=str, help='Path to the head of the dataset', required=True) parser.add_argument('--filename', type=str, help='Name of file to save', default=None) parser.add_argument('--preloads-name', type=str, help='Name of preload file', default=None) parser.add_argument('--processes-per-cpu', '-proc', type=int, help='Processes per cpu (default: %(default)s)', default=2) parser.add_argument('--speed-factor', '-sf', type=float, help='Speed factor to normalize data (default: %(default)s)', default=14.0) parser.add_argument('--plot-clients', action='store_true', help='Add flag to plot the actions and speed of a client') args = parser.parse_args() # Create dir if it doesn't exist if not os.path.exists(os.path.join(os.getcwd(), 'data_analysis')): os.mkdir(os.path.join(os.getcwd(), 'data_analysis')) print('Getting the dataframe...') if args.preloads_name is not None: # Preloaded data is augmented df, augmented = get_augmented_df(preloads_name=args.preloads_name) save_name = os.path.basename(args.preloads_name).split('.')[0] else: assert args.filename is not None assert args.filename.endswith('.npy') df, augmented = get_original_df(args.path, args.filename, args.processes_per_cpu) save_name = os.path.basename(args.filename).split('.')[0] # Create and save the violin plots print('Plotting data...') violin_plot(df, save_name, augmented) if args.plot_clients: print(f'Plotting actions taken by all clients in {args.path}...') plot_clients(path=args.path, df=df, augmented=augmented, speed_factor=args.speed_factor) print('Done!') # ==================================================================== if __name__ == '__main__': main() # ====================================================================
analyze_dataset.py
12,428
Use the preloads file to load the data; will be augmented, as that's what we did Get the index and directions from the df of the actions taken by the client Get a single data from the given file.json path Get a DataFrame from all the can_bus*.json files in the dataset Plot the steer, throttle, brake, and speed of a client during its data collection Save violin plot for the interesting parameters using df default='warn' ==================================================================== Construct the dataset Create dataframe with the data ==================================================================== ==================================================================== Auxiliary function for setting the quartile lines Get respective subset of the dataframe When using tight layout, we need the title to be spaced accordingly ==================================================================== Some sanity check Get dataset name and make the necessary directories Get the number of clients/cars that collected the data Remove path of metadata.json Total number of frames and for a single client Aux function In order to compare, turn directions into a string Shift directions column by 1 (filling the top with the head), and compare to the original Get the rows where there's a change Add the first frame For these indexes, get the value of the direction Add the last frame Dictionaries containing the name and color for plotting the direction given to the car Initialize the total counts per action Make a plot for each client Dataframe will have augmented data, which uses center, left, right, center, ... data Augmented data will have been normalized already normalize to range [0, 1] The actual max speed (see if it differs from collected data) Build the plot Set the area colors for when an direction is taken Save these index and directions for each client Count the directions taken by the client Add this to the total for the whole dataset Add the counts to the title Set title and x and y axes labels ticks in 5% increments Fix the legend / remove duplicated areas and labels Add summary and save it as a JSON file ==================================================================== Create dir if it doesn't exist Preloaded data is augmented Create and save the violin plots ==================================================================== ====================================================================
2,439
en
0.770533
#! /usr/bin/env python #coding=utf-8 import ply.lex as lex # LEX for parsing Python # Tokens tokens=('VARIABLE','NUMBER', 'IF', 'ELIF', 'ELSE', 'WHILE', 'FOR', 'PRINT', 'INC', 'LEN', 'GDIV', 'BREAK', 'LET') literals=['=','+','-','*','(',')','{','}','<','>', ';', ',', '[', ']'] #Define of tokens def t_NUMBER(t): r'[0-9]+' return t def t_PRINT(t): r'print' return t def t_IF(t): r'if' return t def t_WHILE(t): r'while' return t def t_FOR(t): r'for' return t def t_LEN(t): r'len' return t def t_INC(t): '\+\+' return t def t_GDIV(t): r'//' return t def t_BREAK(t): r'break' return t def t_LET(t): r'<=' return t def t_ELIF(t): r'elif' return t def t_ELSE(t): r'else' return t def t_VARIABLE(t): r'[a-zA-Z_]+' return t # Ignored t_ignore = " \t" def t_error(t): print("Illegal character '%s'" % t.value[0]) t.lexer.skip(1) lex.lex()
py_lex.py
1,037
break elif else for // if \+\+ len <= [0-9]+ print [a-zA-Z_]+ while ! /usr/bin/env pythoncoding=utf-8 LEX for parsing Python TokensDefine of tokens Ignored
156
en
0.41487
#from collections import Counter import requests from bs4 import BeautifulSoup from tabulate import tabulate import backoff import json @backoff.on_exception(backoff.expo, requests.exceptions.RequestException, max_time=60) def get_url(url):#, headers): return requests.get(url) #, headers=headers) URL = 'http://www.cancerimagingarchive.net/collections/' page = get_url(URL) soup = BeautifulSoup(page.content, "html.parser") table = soup.find(id="tablepress-9") #print(table.prettify()) rows = table.find_all("tr") analysis_details = [] with open("output/image_analyses_details.json") as analysis_details_file: analysis_details = json.load(analysis_details_file) print("analysis details:") print(analysis_details) table = [] header = "Collection,DOI,CancerType,Location,Species,Subjects,ImageTypes,SupportingData,Access,Status,Updated".split(",") for row in rows: trow = {} cols = row.find_all("td") for cid, col in enumerate(cols): if cid == 0: trow[header[0]] = col.find("a").text trow[header[1]] = col.find("a")["href"] if not trow[header[1]].startswith("http"): trow[header[1]] = "http:"+col.find("a")["href"] else: trow[header[cid+1]] = col.text if len(trow): table = table + [trow] if trow["SupportingData"].find("Image Analyses")>=0: if trow["Collection"] not in [ i["Collection"] for i in analysis_details]: analysis_details.append({"Collection": trow["Collection"], "DOI":trow["DOI"], "Format":"", "CollectionType": "original", "DICOMstatus": "", "DICOMtarget": "", "Comment": ""}) print(len(rows)) with open("output/collections.json", "w") as f: f.write(json.dumps(table, indent=2)) with open("output/image_analyses_details.json", "w") as f: f.write(json.dumps(analysis_details, indent=2))
scrapers/get_collections.py
1,844
from collections import Counter, headers):, headers=headers)print(table.prettify())
83
en
0.512766
"""This script runs code quality checks on given Python files. Note: This script assumes you use Poetry as your dependency manager. Run the following in your terminal to get help on how to use this script: ```shell poetry run python check_commit.py -h ``` """ import argparse import subprocess from colorama import Fore, Style, deinit, init def blue_bold(message: str) -> str: return f'{Fore.BLUE}{Style.BRIGHT}{message}{Style.RESET_ALL}' def light(message: str) -> str: return f'{Style.DIM}{message}{Style.RESET_ALL}' def run_task(task_message: str, command: str) -> None: """Run a task in the shell, defined by a task message and its associated command.""" print(blue_bold(task_message)) print(light(f'$ {command}')) subprocess.call(command, shell=True) print() if __name__ == '__main__': # initialise terminal colors init() # create parser parser = argparse.ArgumentParser( description=( f'Run code quality checks on the given Python files. By default ' f'this script runs isort, Black and Flake8 successively but you ' f'can use the parameters to selectively run some of these checks.' ), epilog=( 'examples:\n' '\n' ' # run all checks on the my_package/ Python package\n' ' $ poetry run python check_commit.py my_package\n' '\n' ' # run Black and Flake8 on the la.py file and the foo/ folder\n' ' $ poetry run python check_commit.py -b -f8 la.py foo\n' ), formatter_class=argparse.RawTextHelpFormatter, ) # add parser arguments parser.add_argument( '-i', '--isort', help='run isort on the given files', action='store_true', ) parser.add_argument( '-b', '--black', help='run Black on the given files', action='store_true', ) parser.add_argument( '-f8', '--flake8', help='run Flake8 on the given files', action='store_true', ) parser.add_argument( 'files', type=str, nargs='+', help='list of files or directories', ) # parse arguments args = parser.parse_args() # run checks run_all_checks = not args.isort and not args.black and not args.flake8 files = ' '.join(args.files) if run_all_checks or args.isort: run_task( 'Run import autosorting with isort...', f'poetry run isort -rc {files}', ) if run_all_checks or args.black: run_task( 'Run code formatting with Black...', f'poetry run black {files}', ) if run_all_checks or args.flake8: run_task( 'Run code linting with Flake8...', f'poetry run flake8 {files}', ) # de-initialise terminal colors deinit()
check_commit.py
2,872
Run a task in the shell, defined by a task message and its associated command. This script runs code quality checks on given Python files. Note: This script assumes you use Poetry as your dependency manager. Run the following in your terminal to get help on how to use this script: ```shell poetry run python check_commit.py -h ``` initialise terminal colors create parser add parser arguments parse arguments run checks de-initialise terminal colors
454
en
0.7549
#!/usr/bin/env python3 # coding=utf8 from soco import SoCo import socket # http://docs.python-soco.com/en/latest/getting_started.html class SpeakerSonos: def __init__(self): print("SpeakerSonos initialized!") def do(self, params): speaker = SoCo(socket.gethostbyname(params['host'])) print(speaker.groups) if 'volume' in params: speaker.volume = params['volume'] if 'clear_queue' in params: speaker.clear_queue() if 'add_playlist_id_to_queue' in params: playlist = speaker.get_sonos_playlists()[params['add_playlist_id_to_queue']] speaker.add_uri_to_queue(playlist.resources[0].uri) if 'switch_to_tv' in params: speaker.switch_to_tv() if 'next' in params: speaker.next() elif 'previous' in params: speaker.previous() if 'play' in params: speaker.play() elif 'pause' in params: speaker.pause() if 'set_sleep_timer' in params: speaker.set_sleep_timer(params['set_sleep_timer'] * 60)
plugins/speaker_sonos.py
1,115
!/usr/bin/env python3 coding=utf8 http://docs.python-soco.com/en/latest/getting_started.html
92
en
0.175009
import os ''' TableData deals with data that comes from MS Excel, csv, xml. More precisely, it expects a single table which has headings in the first row. It converts between these formats and usually keeps information on a round trip between those formats identical. TableData also allows for simple transformations, like dropping a column. CONVENTIONS *cid is column no or column id *rid is row no or row id *cell refers the content of a cell, a cell is represented by cid|rid, as two integers or (not sure yet) a tuple or a list *cname is the column name (in row 0) NOTE * (x|y) not rows x cols * Currently internal cells do have a type, which may be flattened to str if output is type agnostic. * cid and rid begins with 0, so first cell is 0|0, but ncols and nrows start at 1. Strangely enough, sometimes that is convenient. * interface prefers cname over cid LIMITATIONS Data is stored in memory (in a two dimensional list of lists), so max. size depends on available memory (ram). WHAT NOT TO DO I will NOT allow conversion INTO Excel xsl format, only reading from it. I will not abstract this thing too far. I write it for my current Excel version and the csv flavor that I need (e.g. csv is escaped only for values that contain commas). I don't need multiple Excel sheets, formatting in Excel, lots of types in Excel. UNICODE I am going for UTF-8 encoding, but not sure I have it everywhere yet. xlrd is internally in UTF16LE, I believe. Roundtrip Exceptions *date XML Format made by TableData is <tdx> <row> <cnameA>cell value</cnameA> <cnameB>cell value</cnameB> ... </row> </tdx> The first row will have all columns, even empty ones. The other rows usually omit empty elements with empty values. ''' class TableData: def verbose (self, msg): if self._verbose: print (msg) def _uniqueColumns (self): ''' raise exception if column names (cnames) are not unique ''' if len(set(self.table[0])) != len(self.table[0]): raise Exception('Column names not unique') def __init__ (self, ingester, infile, verbose=None): self._verbose=verbose if ingester == 'xml': self.XMLParser(infile) elif ingester == 'xls': self.XLRDParser(infile) elif ingester == 'csv': self.CSVParser(infile) elif ingester == 'json': self.JSONParser(infile) #todo: modern excel else: raise Exception ('Ingester %s not found' % ingester) self._uniqueColumns() # # INGESTERS (xml, csv) # def load_table (path, verbose=None): ''' File extension aware ingester td=TableData.load_table(path) This is an alternative to _init_. Is this pythonic enough? ''' ext=os.path.splitext(path)[1][1:] return TableData (ext, path,verbose) def XLRDParser (self, infile): ''' Parses old excel file into tableData object. Only first sheet. Dont use this directly, use td=TableData('xsl', infile) td=TableData.load=table(infile) instead xlrd uses UTF16. What comes out of here? TO DO: 1. better tests for -Unicode issues not tested -Excel data fields change appearance 2. conversion/transformation stuff ''' import xlrd import xlrd.sheet from xlrd.sheet import ctype_text self.table=[] # will hold sheet in memory as list of list self.verbose ('xlrd infile %s' % infile) #if not os.path.isfile(infile): # raise Exception ('Input file not found') wb = xlrd.open_workbook(filename=infile, on_demand=True) sheet= wb.sheet_by_index(0) #I'm assuming here that first row consist only of text cells? #start at r=0 because we want to preserve the columns for r in range(0, sheet.nrows): #no row=[] for c in range(sheet.ncols): cell = sheet.cell(r, c) cellTypeStr = ctype_text.get(cell.ctype, 'unknown type') val=cell.value #convert cell types -> dates look changed, but may not be (seconds since epoch)! if cellTypeStr == "number": val=int(float(val)) elif cellTypeStr == "xldate": val=xlrd.xldate.xldate_as_datetime(val, 0) #Warn if comma -> to check if escaped correctly -> quoting works #if ',' in str(val): # self.verbose ("%i/%i contains a comma" % (c,r) ) row.append(val) self.table.append(row) wb.unload_sheet(0) #unload xlrd sheet to save memory def CSVParser (self,infile): import csv self.table=[] # will hold sheet in memory as list of list self.verbose ('csvParser: ' + str(infile)) with open(infile, mode='r', newline='') as csvfile: incsv = csv.reader(csvfile, dialect='excel') for row in incsv: self.table.append(row) #self.verbose (str(row)) def XMLParser (self,infile): #It is practically impossible to reconstruct the full list of columns from xml file #if xmlWriter leaves out empty elements. Instead, I write them at least for first row. self.table=[] # will hold sheet in memory as list of list; overwrite self.verbose ('xml infile %s' % infile) import xml.etree.ElementTree as ET tree = ET.parse(infile) for row in tree.iter("row"): c=0 cnames=[] col=[] for e in row.iter(): if e.tag !='row': #self.verbose ('%s %s' % (e.tag, e.text)) if len(self.table) == 0: #need to create 2 rows from first row in xml cnames.append(e.tag) col.append(e.text) if len(self.table) == 0: self.table.append(cnames) self.table.append(col) #self.verbose (self.table) def JSONParser (self, infile): self.table=[] # will hold sheet in memory as list of list; overwrite import json self.verbose ('json infile %s' % infile) json_data = open(infile, 'r').read() self.table = json.loads(json_data) ## ## read table data, but NO manipulations ## def ncols(self): ''' Returns integer with number of columns in table data ''' return len(self.table[0]) def nrows (self): ''' Returns integer with number of rows in table data ''' return len(self.table) def cell (self, col,row): ''' Return a cell for col,row. td.cell(col,row) Throws exception if col or row are not integer or out of range. What happens on empty cell? I stick to x|y format, although row|col might be more pythonic. Empty cell is '' not None. ''' try: return self.table[row][col] except: self.verbose ('%i|%i doesnt exist' % (col, row)) exit (1) def cindex (self,needle): ''' Returns the column index (c) for column name 'needle'. Throws 'not in list' if 'needle' is not a column name (cname). ''' return self.table[0].index(needle) def colExists (self, cname): try: self.table[0].index(cname) return True except: return False def search (self, needle): ''' Returns list of cells [cid,rid] that contain the needle. r=td.search(needle) # (1,1) tuples, lists? I am not quite sure! ''' results=[] for rid in range(0, self.nrows()): for cid in range(0, self.ncols()): cell=self.cell(cid, rid) #self.verbose ('ce:'+str(cell)) if str(needle) in str(cell): #self.verbose ("%i/%i:%s->%s" % (cid, rid, cell, needle)) results.append ((cid,rid)) return results def search_col (self, cname, needle): ''' Returns list/set of rows that contain the needle for the given col. td.search(cname, needle) ''' results=() c=cindex(cname) for rid in range(0, self.nrows()): if needle in self.cell(c,rid): results.append(rid) def show (self): ''' print representation of table Really print? Why not. ''' for row in self.table: print (row) print ('Table size is %i x %i (cols x rows)' % (self.ncols(), self.nrows())) ## ## SIMPLE UNCONDITIONAL TRANSFORMATIONS ## def delRow (self, r): ''' Drop a row by number. Need to remake the index to cover the hole. ''' #r always means rid self.table.pop(r) #print ('row %i deleted' % r) def delCol (self, cname): ''' Drop a column by cname (Not tested.) ''' c=self.cindex (cname) for r in range(0, self.nrows()): self.table[r].pop(c) def addCol (self,name): ''' Add a new column called name at the end of the row. Cells with be empty. Returns the cid of the new column, same as cindex(cname). ''' #update self.table[0].append(name) self._uniqueColumns() for rid in range(1, self.nrows()): self.table[rid].append('') # append empty cells for all rows return len(self.table[0])-1 # len starts counting at 1, but I want 0 def clean_whitespace (self,cname): cid=self.cindex(cname) for rid in range(1, td.nrows()): td.table[rid][cid]=td.table[rid][cid].replace('\r\n', ' ').replace(' ', ' ') ## ## MORE COMPLEX MANIPULATION ## def delCellAIfColBEq (self,cnameA, cnameB, needle): ''' empty cell in column cnameA if value in column cnameB equals needle in every row untested ''' colA=self.cindex(cnameA) colB=self.cindex(cnameB) for rid in range(1, self.nrows()): if self.table[rid][colB] == needle: self.verbose ('delCellAifColBEq A:%s, B:%s, needle %s' % (cnameA, cnameB, needle)) selt.table[rid][colA]='' def delCellAIfColBContains (self,col_a, col_b, needle): pass def delRowIfColContains (self, cname, needle): ''' Delete row if column equals the value 'needle' Should we use cname or c (colId)? ''' #cant loop thru rows and delete one during the loop col=self.cindex(cname) #it appears that excel and xlrd start with 1 #todo: not sure why I have shave off one here! r=self.nrows()-1 while r> 1: #print ('AA%i/%i: ' % (r,col)) cell=self.cell (r, col) if needle in str(cell): #print ('DD:%i/%s:%s' % (r, cname, cell)) #print ('delRowIfColEq: needle %s found in row %i'% (needle, r)) self.delRow(r) r -=1 def delRowIfColEq (self,col, needle): pass def renameCol (self, cnameOld, cnameNew): ''' renames column cnameOld into cnameNew ''' c=self.cindex(cnameOld) self.table[0][c]=cnameNew def default_per_col (cname, default_value): ''' Default Value: if cell is empty replace with default value self.default_per_col ('status', 'filled') ''' cid=td.cindex(cname) for rid in range(1, td.nrows()): if not td.cell (cid,rid): self.table[rid][cid]=default_value ### ### converting to outside world ### def _outTest(self,out): if os.path.exists(out): self.verbose('Output exists already, will be overwritten: %s' %out) def write (self, out): ''' write to file with extension-awareness ''' ext=os.path.splitext(out)[1][1:].lower() if (ext == 'xml'): self.writeXML (out) elif (ext == 'csv'): self.writeCSV (out) elif (ext == 'json'): self.writeJSON (out) else: print ('Format %s not recognized' % ext) def writeCSV (self,outfile): ''' writes data in tableData object to outfile in csv format Values with commas are quoted. ''' import csv self._outTest(outfile) with open(outfile, mode='w', newline='', encoding='utf-8') as csvfile: out = csv.writer(csvfile, dialect='excel') for r in range(0, self.nrows()): row=self.table[r] out.writerow(row) self.verbose ('csv written to %s' % outfile) def writeXML (self,out): ''' writes table data to file out in xml format ''' import xml.etree.ElementTree as ET from xml.sax.saxutils import escape root = ET.Element("tdx") #table data xml self._outTest(out) def _indent(elem, level=0): i = "\n" + level*" " if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " if not elem.tail or not elem.tail.strip(): elem.tail = i for elem in elem: _indent(elem, level+1) if not elem.tail or not elem.tail.strip(): elem.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i #don't need cnames here, so start at 1, but then write all columns in first row for r in range(1, self.nrows()): doc = ET.SubElement(root, "row") for c in range(0, self.ncols()): cell = self.cell(c,r) #print ('x,y: %i/%i: %s->%s ' % (r, c, self.columns[c], cell)) #for round trip I need empty cells, at least in the first row if cell or r == 1: ET.SubElement(doc, self.table[0][c]).text=escape(str(cell)) tree = ET.ElementTree(root) _indent(root) tree.write(out, encoding='UTF-8', xml_declaration=True) self.verbose ('xml written to %s' % out) def writeJSON (self, out): ''' Writes table data in json to file out JSON doesn't have date type, hence default=str ''' import json self._outTest(out) f = open(out, 'w') with f as outfile: json.dump(self.table, outfile, default=str) self.verbose ('json written to %s' % out) if __name__ == '__main__': pass
TableData.py
15,407
Parses old excel file into tableData object. Only first sheet. Dont use this directly, use td=TableData('xsl', infile) td=TableData.load=table(infile) instead xlrd uses UTF16. What comes out of here? TO DO: 1. better tests for -Unicode issues not tested -Excel data fields change appearance 2. conversion/transformation stuff raise exception if column names (cnames) are not unique Add a new column called name at the end of the row. Cells with be empty. Returns the cid of the new column, same as cindex(cname). Return a cell for col,row. td.cell(col,row) Throws exception if col or row are not integer or out of range. What happens on empty cell? I stick to x|y format, although row|col might be more pythonic. Empty cell is '' not None. Returns the column index (c) for column name 'needle'. Throws 'not in list' if 'needle' is not a column name (cname). Default Value: if cell is empty replace with default value self.default_per_col ('status', 'filled') empty cell in column cnameA if value in column cnameB equals needle in every row untested Drop a column by cname (Not tested.) Drop a row by number. Need to remake the index to cover the hole. Delete row if column equals the value 'needle' Should we use cname or c (colId)? File extension aware ingester td=TableData.load_table(path) This is an alternative to _init_. Is this pythonic enough? Returns integer with number of columns in table data Returns integer with number of rows in table data renames column cnameOld into cnameNew Returns list of cells [cid,rid] that contain the needle. r=td.search(needle) # (1,1) tuples, lists? I am not quite sure! Returns list/set of rows that contain the needle for the given col. td.search(cname, needle) print representation of table Really print? Why not. write to file with extension-awareness writes data in tableData object to outfile in csv format Values with commas are quoted. Writes table data in json to file out JSON doesn't have date type, hence default=str writes table data to file out in xml format todo: modern excel INGESTERS (xml, csv) will hold sheet in memory as list of listif not os.path.isfile(infile): raise Exception ('Input file not found') I'm assuming here that first row consist only of text cells?start at r=0 because we want to preserve the columnsnoconvert cell types -> dates look changed, but may not be (seconds since epoch)!Warn if comma -> to check if escaped correctly -> quoting works if ',' in str(val): self.verbose ("%i/%i contains a comma" % (c,r) ) unload xlrd sheet to save memory will hold sheet in memory as list of listself.verbose (str(row))It is practically impossible to reconstruct the full list of columns from xml fileif xmlWriter leaves out empty elements. Instead, I write them at least for first row. will hold sheet in memory as list of list; overwriteself.verbose ('%s %s' % (e.tag, e.text))need to create 2 rows from first row in xmlself.verbose (self.table) will hold sheet in memory as list of list; overwrite read table data, but NO manipulationsself.verbose ('ce:'+str(cell))self.verbose ("%i/%i:%s->%s" % (cid, rid, cell, needle)) SIMPLE UNCONDITIONAL TRANSFORMATIONS r always means ridprint ('row %i deleted' % r)update append empty cells for all rows len starts counting at 1, but I want 0 MORE COMPLEX MANIPULATIONcant loop thru rows and delete one during the loop it appears that excel and xlrd start with 1todo: not sure why I have shave off one here!print ('AA%i/%i: ' % (r,col))print ('DD:%i/%s:%s' % (r, cname, cell))print ('delRowIfColEq: needle %s found in row %i'% (needle, r)) converting to outside worldtable data xml don't need cnames here, so start at 1, but then write all columns in first row print ('x,y: %i/%i: %s->%s ' % (r, c, self.columns[c], cell))for round trip I need empty cells, at least in the first row
3,883
en
0.722777
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Prank(Package): """A powerful multiple sequence alignment browser.""" homepage = "http://wasabiapp.org/software/prank/" url = "http://wasabiapp.org/download/prank/prank.source.170427.tgz" version('170427', sha256='623eb5e9b5cb0be1f49c3bf715e5fabceb1059b21168437264bdcd5c587a8859') depends_on('mafft') depends_on('exonerate') depends_on('bpp-suite') # for bppancestor conflicts('%[email protected]', when='@:150803') def install(self, spec, prefix): with working_dir('src'): filter_file('gcc', '{0}'.format(spack_cc), 'Makefile', string=True) filter_file('g++', '{0}'.format(spack_cxx), 'Makefile', string=True) if not spec.target.family == 'x86_64': filter_file('-m64', '', 'Makefile', string=True) filter_file('-pipe', '', 'Makefile', string=True) make() mkdirp(prefix.bin) install('prank', prefix.bin)
var/spack/repos/builtin/packages/prank/package.py
1,234
A powerful multiple sequence alignment browser. Copyright 2013-2019 Lawrence Livermore National Security, LLC and other Spack Project Developers. See the top-level COPYRIGHT file for details. SPDX-License-Identifier: (Apache-2.0 OR MIT) for bppancestor
254
en
0.68961
import pytest import math import os import sys module_dir = os.path.dirname(__file__) sys.path.append(os.path.join(module_dir, '..', 'intervalpy')) from intervalpy import Interval def test_intersection(): # closed, closed d1 = Interval(0, 2, start_open=False, end_open=False) d2 = Interval(1, 3, start_open=False, end_open=False) assert d1.contains(0) assert d1.contains(1) assert d1.contains(2) d = Interval.intersection([d1, d2]) assert d.start == 1 assert d.end == 2 assert not d.start_open assert not d.end_open d = Interval.union([d1, d2]) assert d.start == 0 assert d.end == 3 assert not d.start_open assert not d.end_open # closed, open d1 = Interval(0, 2, start_open=False, end_open=False) d2 = Interval(1, 3, start_open=True, end_open=True) d = Interval.intersection([d1, d2]) assert d.start == 1 assert d.end == 2 assert d.start_open assert not d.end_open d = Interval.union([d1, d2]) assert d.start == 0 assert d.end == 3 assert not d.start_open assert d.end_open # open, open d1 = Interval(0, 2, start_open=True, end_open=True) d2 = Interval(1, 3, start_open=True, end_open=True) assert not d1.contains(0) assert d1.contains(1) assert not d1.contains(2) d = Interval.intersection([d1, d2]) assert d.start == 1 assert d.end == 2 assert d.start_open assert d.end_open d = Interval.union([d1, d2]) assert d.start == 0 assert d.end == 3 assert d.start_open assert d.end_open d = Interval.intersection([Interval(0, 1), Interval(2, 3)]) assert d.is_empty d = Interval.intersection([Interval(0, 1, end_open=True), Interval(1, 3, start_open=True)]) assert d.is_empty d = Interval.intersection([Interval(0, 1), Interval.empty()]) assert d.is_empty d = Interval.union([Interval.empty(), 1]) assert d.start == 1 assert d.end == 1 def test_interval_contains_inf(): inf = Interval.infinite() assert inf.contains(math.inf) is True assert inf.contains(-math.inf) is True assert Interval.gte(0).contains(math.inf) is True assert Interval.gte(0).contains(-math.inf) is False assert Interval.lte(0).contains(math.inf) is False assert Interval.lte(0).contains(-math.inf) is True def test_intersection_inf(): assert Interval.intersection([Interval.gte(100), (98, 101)]) == (100, 101) assert Interval.intersection([Interval.point(100), Interval.open_closed(100, 101)]) == Interval.empty() def test_cast(): assert bool(Interval.empty()) is False assert bool(Interval(0, 0)) is True assert list(Interval.empty()) == [] assert list(Interval(0, 0)) == [0, 0] assert list(Interval.open(1, 20)) == [1, 20] def test_intersects(): assert Interval.closed(1, 3).intersects(Interval.closed(2, 3)) assert Interval.closed(1, 3).intersects((2, 3)) assert Interval.closed(1, 3).intersects((1, 3)) assert Interval.closed(1, 3).intersects(Interval.open(1, 3)) assert Interval.closed(1, 3).intersects(Interval.closed(3, 4)) assert not Interval.closed(1, 3).intersects(Interval.open(3, 4)) assert not Interval.open(1, 3).intersects(Interval.closed(3, 4)) assert Interval.point(3).intersects(Interval.closed(3, 4)) assert Interval.point(3).intersects(Interval.closed(1, 3)) assert not Interval.point(3).intersects(Interval.open(3, 4)) assert not Interval.point(3).intersects(Interval.open(1, 3)) assert Interval.closed(1, 3).intersects(Interval.closed(0, 1)) assert not Interval.closed(1, 3).intersects(Interval.open(0, 1)) assert not Interval.open(1, 3).intersects(Interval.closed(0, 1)) assert not Interval.closed(1, 3).intersects(Interval.closed(4, 5)) assert not Interval.closed(1, 3).intersects(Interval.closed(-2, 0)) assert not Interval.closed(1, 3).intersects(Interval.empty()) assert Interval.closed(1, 3).intersects(Interval.infinite()) assert not Interval.point(1).intersects(Interval.open_closed(1, 2)) def test_parse(): d = Interval.parse(Interval(0, 1, start_open=True, end_open=True)) assert d.start == 0 assert d.end == 1 assert d.start_open assert d.end_open d = Interval.parse((0, 1)) assert d.start == 0 assert d.end == 1 assert not d.start_open assert not d.end_open d = Interval.parse(1) assert d.start == 1 assert d.end == 1 assert not d.start_open assert not d.end_open with pytest.raises(Exception): _ = Interval.parse(None) with pytest.raises(Exception): _ = Interval.parse(None, default_inf=False) assert Interval.parse(None, default_inf=True) == Interval.infinite() d = Interval.parse(math.inf) assert math.isinf(d.start) assert math.isinf(d.end) assert d.start > 0 assert d.end > 0 assert not d.is_negative_infinite assert not d.is_positive_infinite d = Interval.parse(-math.inf) assert math.isinf(d.start) assert math.isinf(d.end) assert d.start < 0 assert d.end < 0 assert not d.is_negative_infinite assert not d.is_positive_infinite d = Interval.parse([]) assert d.is_empty def test_partition(): ds = Interval(1, 3).partition([2]) assert list(map(tuple, ds)) == [(1, 2), (2, 3)] assert not ds[0].start_open assert ds[0].end_open assert not ds[1].start_open assert not ds[1].end_open ds = Interval(0, 3).partition([0, 1, 2, 3, 4], start_open=True) assert list(map(tuple, ds)) == [(0, 0), (0, 1), (1, 2), (2, 3)] assert not ds[0].start_open assert not ds[0].end_open assert ds[1].start_open assert not ds[1].end_open ds = Interval(0, 3).partition([0, 1, 2, 3, 4], start_open=False) assert list(map(tuple, ds)) == [(0, 1), (1, 2), (2, 3), (3, 3)] assert not ds[0].start_open assert ds[0].end_open assert not ds[1].start_open assert ds[1].end_open def test_subset(): d = Interval(1, 3) assert d.is_subset_of((0, 4)) assert d.is_subset_of((1, 3)) assert not d.is_subset_of(Interval.closed_open(1, 3)) assert d.is_superset_of((2, 2)) assert d.is_superset_of((1, 3)) assert d.is_superset_of(Interval.closed_open(1, 3)) def test_equals(): d = Interval(1, 3) assert d.equals((1, 3)) assert not d.equals(None) assert not d.equals(Interval.closed_open(1, 3)) assert Interval.empty().equals(Interval.empty()) # Empty intervals are always equal assert Interval.open(1, 1).equals(Interval.open(2, 2)) assert Interval.infinite().equals(Interval.infinite()) def test_infinite(): assert Interval.gte(math.inf).is_empty is True assert Interval.gte(-math.inf).is_empty is False assert Interval.lte(math.inf).is_empty is False assert Interval.lte(-math.inf).is_empty is True def test_round(): assert Interval(1.2, 3.4).round() == (1, 3) assert Interval(1.2, 3.4).round(method=math.floor) == (1, 3) assert Interval(1.2, 3.4).round(method=math.ceil) == (2, 4) assert Interval.open_closed(1.2, 3.4).round() == Interval.open_closed(1, 3) assert Interval.closed_open(1.2, 3.4).round() == Interval.closed_open(1, 3) assert Interval.empty().round() == Interval.empty() def test_extensions(): d = Interval(1, 3) assert d.get_lte().equals(Interval.lte(3)) assert d.get_gte().equals(Interval.gte(1)) assert d.get_lt().equals(Interval.lt(1)) assert d.get_gt().equals(Interval.gt(3)) d = Interval.open(1, 3) assert d.get_lte().equals(Interval.lt(3)) assert d.get_gte().equals(Interval.gt(1)) assert d.get_lt().equals(Interval.lte(1)) assert d.get_gt().equals(Interval.gte(3)) d = Interval.empty() assert d.get_lte().is_empty assert d.get_gte().is_empty assert d.get_lt().is_empty assert d.get_gt().is_empty def test_inequalities(): assert Interval(1, 3) == (1, 3) assert (1, 3) == Interval(1, 3) assert Interval(1, 3) < (4, 6) assert not Interval(1, 3) < (3, 6) assert not Interval(1, 3) < (-3, -1) assert Interval(1, 3) <= (3, 6) assert Interval(1, 3) <= (2, 6) assert Interval(1, 3) <= (1, 6) assert Interval(3, 5) <= (1, 6) assert not Interval(1, 3) <= (-3, -1) assert not Interval(3, 6) <= Interval.open(1, 6) assert Interval(1, 3) < Interval.empty() assert Interval(1, 3) <= Interval.empty() assert Interval(7, 9) > (4, 6) assert not Interval(7, 9) > (4, 7) assert not Interval(7, 9) > (10, 12) assert Interval(7, 9) >= (4, 7) assert Interval(7, 9) >= (4, 8) assert Interval(7, 9) >= (4, 9) assert not Interval(7, 9) >= (10, 12) assert not Interval(4, 10) >= Interval.open(4, 9) assert Interval(7, 9) > Interval.empty() assert Interval(7, 9) >= Interval.empty() def test_arithmetic(): assert Interval(1, 3) + (2, 4) == (1, 4) assert (1, 3) + Interval(2, 4) == (1, 4) assert Interval.open(1, 3) + (2, 4) == Interval.open_closed(1, 4) assert (1, 3) + Interval.open(2, 4) == Interval.closed_open(1, 4)
test/interval_test.py
9,066
closed, closed closed, open open, open Empty intervals are always equal
71
en
0.955017
# coding: utf-8 """ Cherwell REST API Unofficial Python Cherwell REST API library. # noqa: E501 The version of the OpenAPI document: 9.3.2 Contact: See AUTHORS. Generated by: https://openapi-generator.tech """ from __future__ import absolute_import import re # noqa: F401 # python 2 and python 3 compatibility library import six from pycherwell.api_client import ApiClient from pycherwell.exceptions import ( ApiTypeError, ApiValueError ) class TeamsApi(object): """NOTE: This class is auto generated by OpenAPI Generator Ref: https://openapi-generator.tech Do not edit the class manually. """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def teams_add_user_to_team_by_batch_v1(self, add_user_to_team_by_batch_request, **kwargs): # noqa: E501 """Add users to a team by batch # noqa: E501 Operation to add users to a Team by batch. To get internal IDs for users, use “Get User Information in a Batch.” To get a Team's internal ID, use \"Get all available Teams.\" # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_add_user_to_team_by_batch_v1(add_user_to_team_by_batch_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param AddUserToTeamByBatchRequest add_user_to_team_by_batch_request: Request object to specify a list of add user to team request objects. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: AddUserToTeamByBatchResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.teams_add_user_to_team_by_batch_v1_with_http_info(add_user_to_team_by_batch_request, **kwargs) # noqa: E501 def teams_add_user_to_team_by_batch_v1_with_http_info(self, add_user_to_team_by_batch_request, **kwargs): # noqa: E501 """Add users to a team by batch # noqa: E501 Operation to add users to a Team by batch. To get internal IDs for users, use “Get User Information in a Batch.” To get a Team's internal ID, use \"Get all available Teams.\" # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_add_user_to_team_by_batch_v1_with_http_info(add_user_to_team_by_batch_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param AddUserToTeamByBatchRequest add_user_to_team_by_batch_request: Request object to specify a list of add user to team request objects. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(AddUserToTeamByBatchResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = ['add_user_to_team_by_batch_request'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method teams_add_user_to_team_by_batch_v1" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'add_user_to_team_by_batch_request' is set if self.api_client.client_side_validation and ('add_user_to_team_by_batch_request' not in local_var_params or # noqa: E501 local_var_params['add_user_to_team_by_batch_request'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `add_user_to_team_by_batch_request` when calling `teams_add_user_to_team_by_batch_v1`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'add_user_to_team_by_batch_request' in local_var_params: body_params = local_var_params['add_user_to_team_by_batch_request'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/api/V1/addusertoteambybatch', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AddUserToTeamByBatchResponse', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def teams_add_user_to_team_v1(self, add_user_to_team_request, **kwargs): # noqa: E501 """Add a user to a team # noqa: E501 Operation to add a user to a Team. To get the user's internal ID, use \"Get a user by login ID\" or \"Get a user by public ID.\" To get a Team's internal ID, use \"Get all available Teams.\" # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_add_user_to_team_v1(add_user_to_team_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param AddUserToTeamRequest add_user_to_team_request: Request object to specify user and team values. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.teams_add_user_to_team_v1_with_http_info(add_user_to_team_request, **kwargs) # noqa: E501 def teams_add_user_to_team_v1_with_http_info(self, add_user_to_team_request, **kwargs): # noqa: E501 """Add a user to a team # noqa: E501 Operation to add a user to a Team. To get the user's internal ID, use \"Get a user by login ID\" or \"Get a user by public ID.\" To get a Team's internal ID, use \"Get all available Teams.\" # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_add_user_to_team_v1_with_http_info(add_user_to_team_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param AddUserToTeamRequest add_user_to_team_request: Request object to specify user and team values. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = ['add_user_to_team_request'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method teams_add_user_to_team_v1" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'add_user_to_team_request' is set if self.api_client.client_side_validation and ('add_user_to_team_request' not in local_var_params or # noqa: E501 local_var_params['add_user_to_team_request'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `add_user_to_team_request` when calling `teams_add_user_to_team_v1`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'add_user_to_team_request' in local_var_params: body_params = local_var_params['add_user_to_team_request'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/api/V1/addusertoteam', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def teams_add_user_to_team_v2(self, add_user_to_team_request, **kwargs): # noqa: E501 """Add a user to a team # noqa: E501 Operation to add a user to a Team. To get the user's internal ID, use \"Get a user by login ID\" or \"Get a user by public ID.\" To get a Team's internal ID, use \"Get all available Teams.\" # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_add_user_to_team_v2(add_user_to_team_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param AddUserToTeamRequest add_user_to_team_request: Request object to specify user and team values. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: AddUserToTeamResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.teams_add_user_to_team_v2_with_http_info(add_user_to_team_request, **kwargs) # noqa: E501 def teams_add_user_to_team_v2_with_http_info(self, add_user_to_team_request, **kwargs): # noqa: E501 """Add a user to a team # noqa: E501 Operation to add a user to a Team. To get the user's internal ID, use \"Get a user by login ID\" or \"Get a user by public ID.\" To get a Team's internal ID, use \"Get all available Teams.\" # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_add_user_to_team_v2_with_http_info(add_user_to_team_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param AddUserToTeamRequest add_user_to_team_request: Request object to specify user and team values. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(AddUserToTeamResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = ['add_user_to_team_request'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method teams_add_user_to_team_v2" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'add_user_to_team_request' is set if self.api_client.client_side_validation and ('add_user_to_team_request' not in local_var_params or # noqa: E501 local_var_params['add_user_to_team_request'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `add_user_to_team_request` when calling `teams_add_user_to_team_v2`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'add_user_to_team_request' in local_var_params: body_params = local_var_params['add_user_to_team_request'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/api/V2/addusertoteam', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='AddUserToTeamResponse', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def teams_delete_team_v1(self, teamid, **kwargs): # noqa: E501 """Delete a Team # noqa: E501 Operation to delete a Team by Team ID. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_delete_team_v1(teamid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str teamid: Specify the Team ID. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.teams_delete_team_v1_with_http_info(teamid, **kwargs) # noqa: E501 def teams_delete_team_v1_with_http_info(self, teamid, **kwargs): # noqa: E501 """Delete a Team # noqa: E501 Operation to delete a Team by Team ID. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_delete_team_v1_with_http_info(teamid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str teamid: Specify the Team ID. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = ['teamid'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method teams_delete_team_v1" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'teamid' is set if self.api_client.client_side_validation and ('teamid' not in local_var_params or # noqa: E501 local_var_params['teamid'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `teamid` when calling `teams_delete_team_v1`") # noqa: E501 collection_formats = {} path_params = {} if 'teamid' in local_var_params: path_params['teamid'] = local_var_params['teamid'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/api/V1/deleteteam/{teamid}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def teams_get_team_v1(self, teamid, **kwargs): # noqa: E501 """Get a team by its TeamId # noqa: E501 Operation to get Team Info for a single Team using its Team ID. To get a Team's internal ID, use \"Get all available Teams.\" Note that TeamType has two possible values, where TeamType = 0 for User (CSM Users), or TeamType = 1 for Workgroup (CSM Customers). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_team_v1(teamid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str teamid: The Team ID of the Team to get. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: TeamResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.teams_get_team_v1_with_http_info(teamid, **kwargs) # noqa: E501 def teams_get_team_v1_with_http_info(self, teamid, **kwargs): # noqa: E501 """Get a team by its TeamId # noqa: E501 Operation to get Team Info for a single Team using its Team ID. To get a Team's internal ID, use \"Get all available Teams.\" Note that TeamType has two possible values, where TeamType = 0 for User (CSM Users), or TeamType = 1 for Workgroup (CSM Customers). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_team_v1_with_http_info(teamid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str teamid: The Team ID of the Team to get. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(TeamResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = ['teamid'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method teams_get_team_v1" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'teamid' is set if self.api_client.client_side_validation and ('teamid' not in local_var_params or # noqa: E501 local_var_params['teamid'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `teamid` when calling `teams_get_team_v1`") # noqa: E501 collection_formats = {} path_params = {} if 'teamid' in local_var_params: path_params['teamid'] = local_var_params['teamid'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/api/V1/getteam/{teamid}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TeamResponse', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def teams_get_teams_v1(self, **kwargs): # noqa: E501 """Get all available Teams # noqa: E501 Operation to get IDs and names for all available Teams. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_teams_v1(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: TeamsResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.teams_get_teams_v1_with_http_info(**kwargs) # noqa: E501 def teams_get_teams_v1_with_http_info(self, **kwargs): # noqa: E501 """Get all available Teams # noqa: E501 Operation to get IDs and names for all available Teams. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_teams_v1_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(TeamsResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method teams_get_teams_v1" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/api/V1/getteams', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TeamsResponse', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def teams_get_teams_v2(self, **kwargs): # noqa: E501 """Get all available Teams # noqa: E501 Operation to get IDs and names for all available Teams. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_teams_v2(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: TeamsV2Response If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.teams_get_teams_v2_with_http_info(**kwargs) # noqa: E501 def teams_get_teams_v2_with_http_info(self, **kwargs): # noqa: E501 """Get all available Teams # noqa: E501 Operation to get IDs and names for all available Teams. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_teams_v2_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(TeamsV2Response, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method teams_get_teams_v2" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/api/V2/getteams', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TeamsV2Response', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def teams_get_users_teams_v1(self, user_record_id, **kwargs): # noqa: E501 """Get Team assignments for a user # noqa: E501 Operation to get Team assignments for a user. To get record IDs, use \"Get a user by login ID\" or \"Get a user by public id.\" # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_users_teams_v1(user_record_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str user_record_id: Specify the user record ID. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: TeamsResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.teams_get_users_teams_v1_with_http_info(user_record_id, **kwargs) # noqa: E501 def teams_get_users_teams_v1_with_http_info(self, user_record_id, **kwargs): # noqa: E501 """Get Team assignments for a user # noqa: E501 Operation to get Team assignments for a user. To get record IDs, use \"Get a user by login ID\" or \"Get a user by public id.\" # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_users_teams_v1_with_http_info(user_record_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str user_record_id: Specify the user record ID. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(TeamsResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = ['user_record_id'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method teams_get_users_teams_v1" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'user_record_id' is set if self.api_client.client_side_validation and ('user_record_id' not in local_var_params or # noqa: E501 local_var_params['user_record_id'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `user_record_id` when calling `teams_get_users_teams_v1`") # noqa: E501 collection_formats = {} path_params = {} if 'user_record_id' in local_var_params: path_params['userRecordId'] = local_var_params['user_record_id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/api/V1/getusersteams/userrecordid/{userRecordId}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TeamsResponse', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def teams_get_users_teams_v2(self, user_record_id, **kwargs): # noqa: E501 """Get Team assignments for a user # noqa: E501 Operation to get Team assignments for a user. To get record IDs, use \"Get a user by login ID\" or \"Get a user by public id.\" # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_users_teams_v2(user_record_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str user_record_id: Specify the user record ID. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: TeamsV2Response If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.teams_get_users_teams_v2_with_http_info(user_record_id, **kwargs) # noqa: E501 def teams_get_users_teams_v2_with_http_info(self, user_record_id, **kwargs): # noqa: E501 """Get Team assignments for a user # noqa: E501 Operation to get Team assignments for a user. To get record IDs, use \"Get a user by login ID\" or \"Get a user by public id.\" # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_users_teams_v2_with_http_info(user_record_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str user_record_id: Specify the user record ID. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(TeamsV2Response, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = ['user_record_id'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method teams_get_users_teams_v2" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'user_record_id' is set if self.api_client.client_side_validation and ('user_record_id' not in local_var_params or # noqa: E501 local_var_params['user_record_id'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `user_record_id` when calling `teams_get_users_teams_v2`") # noqa: E501 collection_formats = {} path_params = {} if 'user_record_id' in local_var_params: path_params['userRecordId'] = local_var_params['user_record_id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/api/V2/getusersteams/userrecordid/{userRecordId}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TeamsV2Response', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def teams_get_workgroups_v1(self, **kwargs): # noqa: E501 """Get all available Workgroups # noqa: E501 Operation to get IDs and names for all available Workgroups. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_workgroups_v1(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: TeamsResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.teams_get_workgroups_v1_with_http_info(**kwargs) # noqa: E501 def teams_get_workgroups_v1_with_http_info(self, **kwargs): # noqa: E501 """Get all available Workgroups # noqa: E501 Operation to get IDs and names for all available Workgroups. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_workgroups_v1_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(TeamsResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method teams_get_workgroups_v1" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/api/V1/getworkgroups', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TeamsResponse', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def teams_get_workgroups_v2(self, **kwargs): # noqa: E501 """Get all available Workgroups # noqa: E501 Operation to get IDs and names for all available Workgroups. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_workgroups_v2(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: TeamsV2Response If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.teams_get_workgroups_v2_with_http_info(**kwargs) # noqa: E501 def teams_get_workgroups_v2_with_http_info(self, **kwargs): # noqa: E501 """Get all available Workgroups # noqa: E501 Operation to get IDs and names for all available Workgroups. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_workgroups_v2_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(TeamsV2Response, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method teams_get_workgroups_v2" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/api/V2/getworkgroups', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TeamsV2Response', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def teams_remove_customer_from_workgroup_v1(self, workgroupid, customerrecordid, **kwargs): # noqa: E501 """Remove a customer from a Workgroup # noqa: E501 Operation to remove a Customer from a Workgroup. To remove, specify the Workgroup ID and the Customer Record ID. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_remove_customer_from_workgroup_v1(workgroupid, customerrecordid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str workgroupid: Specify the Workgroup ID. (required) :param str customerrecordid: Specify the Customer record ID. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: RemoveCustomerFromWorkgroupResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.teams_remove_customer_from_workgroup_v1_with_http_info(workgroupid, customerrecordid, **kwargs) # noqa: E501 def teams_remove_customer_from_workgroup_v1_with_http_info(self, workgroupid, customerrecordid, **kwargs): # noqa: E501 """Remove a customer from a Workgroup # noqa: E501 Operation to remove a Customer from a Workgroup. To remove, specify the Workgroup ID and the Customer Record ID. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_remove_customer_from_workgroup_v1_with_http_info(workgroupid, customerrecordid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str workgroupid: Specify the Workgroup ID. (required) :param str customerrecordid: Specify the Customer record ID. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(RemoveCustomerFromWorkgroupResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = ['workgroupid', 'customerrecordid'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method teams_remove_customer_from_workgroup_v1" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'workgroupid' is set if self.api_client.client_side_validation and ('workgroupid' not in local_var_params or # noqa: E501 local_var_params['workgroupid'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `workgroupid` when calling `teams_remove_customer_from_workgroup_v1`") # noqa: E501 # verify the required parameter 'customerrecordid' is set if self.api_client.client_side_validation and ('customerrecordid' not in local_var_params or # noqa: E501 local_var_params['customerrecordid'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `customerrecordid` when calling `teams_remove_customer_from_workgroup_v1`") # noqa: E501 collection_formats = {} path_params = {} if 'workgroupid' in local_var_params: path_params['workgroupid'] = local_var_params['workgroupid'] # noqa: E501 if 'customerrecordid' in local_var_params: path_params['customerrecordid'] = local_var_params['customerrecordid'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/api/V1/removecustomerfromworkgroup/workgroupid/{workgroupid}/customerrecordid/{customerrecordid}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='RemoveCustomerFromWorkgroupResponse', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def teams_remove_user_from_team_v1(self, team_id, userrecordid, **kwargs): # noqa: E501 """Operation to remove a User from a Team. # noqa: E501 Operation to remove a User from a Team. To get the User's record ID, use \"Get a User by login ID\" or \"Get a User by public ID.\" To get a Team's internal ID, use \"Get all available Teams.\" # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_remove_user_from_team_v1(team_id, userrecordid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str team_id: Specify the internal ID of the Team. (required) :param str userrecordid: Specify the record ID of the User to remove. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.teams_remove_user_from_team_v1_with_http_info(team_id, userrecordid, **kwargs) # noqa: E501 def teams_remove_user_from_team_v1_with_http_info(self, team_id, userrecordid, **kwargs): # noqa: E501 """Operation to remove a User from a Team. # noqa: E501 Operation to remove a User from a Team. To get the User's record ID, use \"Get a User by login ID\" or \"Get a User by public ID.\" To get a Team's internal ID, use \"Get all available Teams.\" # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_remove_user_from_team_v1_with_http_info(team_id, userrecordid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str team_id: Specify the internal ID of the Team. (required) :param str userrecordid: Specify the record ID of the User to remove. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = ['team_id', 'userrecordid'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method teams_remove_user_from_team_v1" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'team_id' is set if self.api_client.client_side_validation and ('team_id' not in local_var_params or # noqa: E501 local_var_params['team_id'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `team_id` when calling `teams_remove_user_from_team_v1`") # noqa: E501 # verify the required parameter 'userrecordid' is set if self.api_client.client_side_validation and ('userrecordid' not in local_var_params or # noqa: E501 local_var_params['userrecordid'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `userrecordid` when calling `teams_remove_user_from_team_v1`") # noqa: E501 collection_formats = {} path_params = {} if 'team_id' in local_var_params: path_params['teamId'] = local_var_params['team_id'] # noqa: E501 if 'userrecordid' in local_var_params: path_params['userrecordid'] = local_var_params['userrecordid'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/api/V1/removeuserfromteam/teamid/{teamId}/userrecordid/{userrecordid}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def teams_remove_user_from_team_v2(self, team_id, userrecordid, **kwargs): # noqa: E501 """Operation to remove a User from a Team. # noqa: E501 Operation to remove a User from a Team. To get the User's record ID, use \"Get a User by login ID\" or \"Get a User by public ID.\" To get a Team's internal ID, use \"Get all available Teams.\" # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_remove_user_from_team_v2(team_id, userrecordid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str team_id: Specify the internal ID of the Team. (required) :param str userrecordid: Specify the record ID of the User to remove. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: RemoveUserFromTeamResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.teams_remove_user_from_team_v2_with_http_info(team_id, userrecordid, **kwargs) # noqa: E501 def teams_remove_user_from_team_v2_with_http_info(self, team_id, userrecordid, **kwargs): # noqa: E501 """Operation to remove a User from a Team. # noqa: E501 Operation to remove a User from a Team. To get the User's record ID, use \"Get a User by login ID\" or \"Get a User by public ID.\" To get a Team's internal ID, use \"Get all available Teams.\" # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_remove_user_from_team_v2_with_http_info(team_id, userrecordid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str team_id: Specify the internal ID of the Team. (required) :param str userrecordid: Specify the record ID of the User to remove. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(RemoveUserFromTeamResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = ['team_id', 'userrecordid'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method teams_remove_user_from_team_v2" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'team_id' is set if self.api_client.client_side_validation and ('team_id' not in local_var_params or # noqa: E501 local_var_params['team_id'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `team_id` when calling `teams_remove_user_from_team_v2`") # noqa: E501 # verify the required parameter 'userrecordid' is set if self.api_client.client_side_validation and ('userrecordid' not in local_var_params or # noqa: E501 local_var_params['userrecordid'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `userrecordid` when calling `teams_remove_user_from_team_v2`") # noqa: E501 collection_formats = {} path_params = {} if 'team_id' in local_var_params: path_params['teamId'] = local_var_params['team_id'] # noqa: E501 if 'userrecordid' in local_var_params: path_params['userrecordid'] = local_var_params['userrecordid'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/api/V2/removeuserfromteam/teamid/{teamId}/userrecordid/{userrecordid}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='RemoveUserFromTeamResponse', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def teams_save_team_member_v1(self, save_team_member_request, **kwargs): # noqa: E501 """Add or Update a team member # noqa: E501 Operation to add or update a Team Member. To add or update, specify User ID, Team ID, and if Team Manager. Optionally, set the Team as the User's default Team. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_save_team_member_v1(save_team_member_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param SaveTeamMemberRequest save_team_member_request: The request object to add or update a Team Member. User recID specifies the User to add or update. TeamId specifies the Team to update. IsTeamManager specifies whether the User is a Team Manager, and SetAsDefaultTeam specifies whether to set this Team as the User's default team. UserRecId, TeamId, and IsTeamManager are required. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: SaveTeamMemberResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.teams_save_team_member_v1_with_http_info(save_team_member_request, **kwargs) # noqa: E501 def teams_save_team_member_v1_with_http_info(self, save_team_member_request, **kwargs): # noqa: E501 """Add or Update a team member # noqa: E501 Operation to add or update a Team Member. To add or update, specify User ID, Team ID, and if Team Manager. Optionally, set the Team as the User's default Team. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_save_team_member_v1_with_http_info(save_team_member_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param SaveTeamMemberRequest save_team_member_request: The request object to add or update a Team Member. User recID specifies the User to add or update. TeamId specifies the Team to update. IsTeamManager specifies whether the User is a Team Manager, and SetAsDefaultTeam specifies whether to set this Team as the User's default team. UserRecId, TeamId, and IsTeamManager are required. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(SaveTeamMemberResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = ['save_team_member_request'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method teams_save_team_member_v1" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'save_team_member_request' is set if self.api_client.client_side_validation and ('save_team_member_request' not in local_var_params or # noqa: E501 local_var_params['save_team_member_request'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `save_team_member_request` when calling `teams_save_team_member_v1`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'save_team_member_request' in local_var_params: body_params = local_var_params['save_team_member_request'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/api/V1/saveteammember', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SaveTeamMemberResponse', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def teams_save_team_v1(self, team_save_request, **kwargs): # noqa: E501 """Create or update a team # noqa: E501 Operation to create or update a Team or Workgroup. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_save_team_v1(team_save_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param TeamSaveRequest team_save_request: Request object to create Teams or Workgroups. To create a Team, use teamType and teamName. To update a team, use teamID. Team type values must be User or CustomerWorkgroup. The teamType cannot be changed for existing Teams or Workgroups. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: TeamSaveResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.teams_save_team_v1_with_http_info(team_save_request, **kwargs) # noqa: E501 def teams_save_team_v1_with_http_info(self, team_save_request, **kwargs): # noqa: E501 """Create or update a team # noqa: E501 Operation to create or update a Team or Workgroup. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_save_team_v1_with_http_info(team_save_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param TeamSaveRequest team_save_request: Request object to create Teams or Workgroups. To create a Team, use teamType and teamName. To update a team, use teamID. Team type values must be User or CustomerWorkgroup. The teamType cannot be changed for existing Teams or Workgroups. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(TeamSaveResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = ['team_save_request'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method teams_save_team_v1" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'team_save_request' is set if self.api_client.client_side_validation and ('team_save_request' not in local_var_params or # noqa: E501 local_var_params['team_save_request'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `team_save_request` when calling `teams_save_team_v1`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'team_save_request' in local_var_params: body_params = local_var_params['team_save_request'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/api/V1/saveteam', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='TeamSaveResponse', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def teams_save_workgroup_member_v1(self, save_workgroup_member_request, **kwargs): # noqa: E501 """Save the membership status of a Workgroup member. # noqa: E501 Operation to add or update a Workgroup Member. To add or update, specify Customer Record ID, Workgroup ID, and if Workgroup Manager. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_save_workgroup_member_v1(save_workgroup_member_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param SaveWorkgroupMemberRequest save_workgroup_member_request: The request object to add or update a Workgroup Member. CustomerRecordId specifies the Customer to add or update. WorkgroupId specifies the Workgroup to update. CustomerIsWorkgroupManager specifies whether the Customer is a Workgroup Manager. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: SaveWorkgroupMemberResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.teams_save_workgroup_member_v1_with_http_info(save_workgroup_member_request, **kwargs) # noqa: E501 def teams_save_workgroup_member_v1_with_http_info(self, save_workgroup_member_request, **kwargs): # noqa: E501 """Save the membership status of a Workgroup member. # noqa: E501 Operation to add or update a Workgroup Member. To add or update, specify Customer Record ID, Workgroup ID, and if Workgroup Manager. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_save_workgroup_member_v1_with_http_info(save_workgroup_member_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param SaveWorkgroupMemberRequest save_workgroup_member_request: The request object to add or update a Workgroup Member. CustomerRecordId specifies the Customer to add or update. WorkgroupId specifies the Workgroup to update. CustomerIsWorkgroupManager specifies whether the Customer is a Workgroup Manager. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(SaveWorkgroupMemberResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = ['save_workgroup_member_request'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method teams_save_workgroup_member_v1" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'save_workgroup_member_request' is set if self.api_client.client_side_validation and ('save_workgroup_member_request' not in local_var_params or # noqa: E501 local_var_params['save_workgroup_member_request'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `save_workgroup_member_request` when calling `teams_save_workgroup_member_v1`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'save_workgroup_member_request' in local_var_params: body_params = local_var_params['save_workgroup_member_request'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 return self.api_client.call_api( '/api/V1/saveworkgroupmember', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SaveWorkgroupMemberResponse', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
pycherwell/api/teams_api.py
94,300
NOTE: This class is auto generated by OpenAPI Generator Ref: https://openapi-generator.tech Do not edit the class manually. Add users to a team by batch # noqa: E501 Operation to add users to a Team by batch. To get internal IDs for users, use “Get User Information in a Batch.” To get a Team's internal ID, use "Get all available Teams." # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_add_user_to_team_by_batch_v1(add_user_to_team_by_batch_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param AddUserToTeamByBatchRequest add_user_to_team_by_batch_request: Request object to specify a list of add user to team request objects. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: AddUserToTeamByBatchResponse If the method is called asynchronously, returns the request thread. Add users to a team by batch # noqa: E501 Operation to add users to a Team by batch. To get internal IDs for users, use “Get User Information in a Batch.” To get a Team's internal ID, use "Get all available Teams." # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_add_user_to_team_by_batch_v1_with_http_info(add_user_to_team_by_batch_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param AddUserToTeamByBatchRequest add_user_to_team_by_batch_request: Request object to specify a list of add user to team request objects. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(AddUserToTeamByBatchResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. Add a user to a team # noqa: E501 Operation to add a user to a Team. To get the user's internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team's internal ID, use "Get all available Teams." # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_add_user_to_team_v1(add_user_to_team_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param AddUserToTeamRequest add_user_to_team_request: Request object to specify user and team values. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread. Add a user to a team # noqa: E501 Operation to add a user to a Team. To get the user's internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team's internal ID, use "Get all available Teams." # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_add_user_to_team_v1_with_http_info(add_user_to_team_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param AddUserToTeamRequest add_user_to_team_request: Request object to specify user and team values. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread. Add a user to a team # noqa: E501 Operation to add a user to a Team. To get the user's internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team's internal ID, use "Get all available Teams." # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_add_user_to_team_v2(add_user_to_team_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param AddUserToTeamRequest add_user_to_team_request: Request object to specify user and team values. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: AddUserToTeamResponse If the method is called asynchronously, returns the request thread. Add a user to a team # noqa: E501 Operation to add a user to a Team. To get the user's internal ID, use "Get a user by login ID" or "Get a user by public ID." To get a Team's internal ID, use "Get all available Teams." # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_add_user_to_team_v2_with_http_info(add_user_to_team_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param AddUserToTeamRequest add_user_to_team_request: Request object to specify user and team values. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(AddUserToTeamResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. Delete a Team # noqa: E501 Operation to delete a Team by Team ID. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_delete_team_v1(teamid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str teamid: Specify the Team ID. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread. Delete a Team # noqa: E501 Operation to delete a Team by Team ID. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_delete_team_v1_with_http_info(teamid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str teamid: Specify the Team ID. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread. Get a team by its TeamId # noqa: E501 Operation to get Team Info for a single Team using its Team ID. To get a Team's internal ID, use "Get all available Teams." Note that TeamType has two possible values, where TeamType = 0 for User (CSM Users), or TeamType = 1 for Workgroup (CSM Customers). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_team_v1(teamid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str teamid: The Team ID of the Team to get. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: TeamResponse If the method is called asynchronously, returns the request thread. Get a team by its TeamId # noqa: E501 Operation to get Team Info for a single Team using its Team ID. To get a Team's internal ID, use "Get all available Teams." Note that TeamType has two possible values, where TeamType = 0 for User (CSM Users), or TeamType = 1 for Workgroup (CSM Customers). # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_team_v1_with_http_info(teamid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str teamid: The Team ID of the Team to get. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(TeamResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. Get all available Teams # noqa: E501 Operation to get IDs and names for all available Teams. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_teams_v1(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: TeamsResponse If the method is called asynchronously, returns the request thread. Get all available Teams # noqa: E501 Operation to get IDs and names for all available Teams. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_teams_v1_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(TeamsResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. Get all available Teams # noqa: E501 Operation to get IDs and names for all available Teams. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_teams_v2(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: TeamsV2Response If the method is called asynchronously, returns the request thread. Get all available Teams # noqa: E501 Operation to get IDs and names for all available Teams. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_teams_v2_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(TeamsV2Response, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. Get Team assignments for a user # noqa: E501 Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_users_teams_v1(user_record_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str user_record_id: Specify the user record ID. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: TeamsResponse If the method is called asynchronously, returns the request thread. Get Team assignments for a user # noqa: E501 Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_users_teams_v1_with_http_info(user_record_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str user_record_id: Specify the user record ID. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(TeamsResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. Get Team assignments for a user # noqa: E501 Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_users_teams_v2(user_record_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str user_record_id: Specify the user record ID. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: TeamsV2Response If the method is called asynchronously, returns the request thread. Get Team assignments for a user # noqa: E501 Operation to get Team assignments for a user. To get record IDs, use "Get a user by login ID" or "Get a user by public id." # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_users_teams_v2_with_http_info(user_record_id, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str user_record_id: Specify the user record ID. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(TeamsV2Response, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. Get all available Workgroups # noqa: E501 Operation to get IDs and names for all available Workgroups. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_workgroups_v1(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: TeamsResponse If the method is called asynchronously, returns the request thread. Get all available Workgroups # noqa: E501 Operation to get IDs and names for all available Workgroups. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_workgroups_v1_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(TeamsResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. Get all available Workgroups # noqa: E501 Operation to get IDs and names for all available Workgroups. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_workgroups_v2(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: TeamsV2Response If the method is called asynchronously, returns the request thread. Get all available Workgroups # noqa: E501 Operation to get IDs and names for all available Workgroups. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_get_workgroups_v2_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(TeamsV2Response, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. Remove a customer from a Workgroup # noqa: E501 Operation to remove a Customer from a Workgroup. To remove, specify the Workgroup ID and the Customer Record ID. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_remove_customer_from_workgroup_v1(workgroupid, customerrecordid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str workgroupid: Specify the Workgroup ID. (required) :param str customerrecordid: Specify the Customer record ID. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: RemoveCustomerFromWorkgroupResponse If the method is called asynchronously, returns the request thread. Remove a customer from a Workgroup # noqa: E501 Operation to remove a Customer from a Workgroup. To remove, specify the Workgroup ID and the Customer Record ID. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_remove_customer_from_workgroup_v1_with_http_info(workgroupid, customerrecordid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str workgroupid: Specify the Workgroup ID. (required) :param str customerrecordid: Specify the Customer record ID. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(RemoveCustomerFromWorkgroupResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. Operation to remove a User from a Team. # noqa: E501 Operation to remove a User from a Team. To get the User's record ID, use "Get a User by login ID" or "Get a User by public ID." To get a Team's internal ID, use "Get all available Teams." # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_remove_user_from_team_v1(team_id, userrecordid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str team_id: Specify the internal ID of the Team. (required) :param str userrecordid: Specify the record ID of the User to remove. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread. Operation to remove a User from a Team. # noqa: E501 Operation to remove a User from a Team. To get the User's record ID, use "Get a User by login ID" or "Get a User by public ID." To get a Team's internal ID, use "Get all available Teams." # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_remove_user_from_team_v1_with_http_info(team_id, userrecordid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str team_id: Specify the internal ID of the Team. (required) :param str userrecordid: Specify the record ID of the User to remove. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: None If the method is called asynchronously, returns the request thread. Operation to remove a User from a Team. # noqa: E501 Operation to remove a User from a Team. To get the User's record ID, use "Get a User by login ID" or "Get a User by public ID." To get a Team's internal ID, use "Get all available Teams." # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_remove_user_from_team_v2(team_id, userrecordid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str team_id: Specify the internal ID of the Team. (required) :param str userrecordid: Specify the record ID of the User to remove. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: RemoveUserFromTeamResponse If the method is called asynchronously, returns the request thread. Operation to remove a User from a Team. # noqa: E501 Operation to remove a User from a Team. To get the User's record ID, use "Get a User by login ID" or "Get a User by public ID." To get a Team's internal ID, use "Get all available Teams." # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_remove_user_from_team_v2_with_http_info(team_id, userrecordid, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str team_id: Specify the internal ID of the Team. (required) :param str userrecordid: Specify the record ID of the User to remove. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(RemoveUserFromTeamResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. Add or Update a team member # noqa: E501 Operation to add or update a Team Member. To add or update, specify User ID, Team ID, and if Team Manager. Optionally, set the Team as the User's default Team. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_save_team_member_v1(save_team_member_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param SaveTeamMemberRequest save_team_member_request: The request object to add or update a Team Member. User recID specifies the User to add or update. TeamId specifies the Team to update. IsTeamManager specifies whether the User is a Team Manager, and SetAsDefaultTeam specifies whether to set this Team as the User's default team. UserRecId, TeamId, and IsTeamManager are required. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: SaveTeamMemberResponse If the method is called asynchronously, returns the request thread. Add or Update a team member # noqa: E501 Operation to add or update a Team Member. To add or update, specify User ID, Team ID, and if Team Manager. Optionally, set the Team as the User's default Team. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_save_team_member_v1_with_http_info(save_team_member_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param SaveTeamMemberRequest save_team_member_request: The request object to add or update a Team Member. User recID specifies the User to add or update. TeamId specifies the Team to update. IsTeamManager specifies whether the User is a Team Manager, and SetAsDefaultTeam specifies whether to set this Team as the User's default team. UserRecId, TeamId, and IsTeamManager are required. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(SaveTeamMemberResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. Create or update a team # noqa: E501 Operation to create or update a Team or Workgroup. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_save_team_v1(team_save_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param TeamSaveRequest team_save_request: Request object to create Teams or Workgroups. To create a Team, use teamType and teamName. To update a team, use teamID. Team type values must be User or CustomerWorkgroup. The teamType cannot be changed for existing Teams or Workgroups. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: TeamSaveResponse If the method is called asynchronously, returns the request thread. Create or update a team # noqa: E501 Operation to create or update a Team or Workgroup. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_save_team_v1_with_http_info(team_save_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param TeamSaveRequest team_save_request: Request object to create Teams or Workgroups. To create a Team, use teamType and teamName. To update a team, use teamID. Team type values must be User or CustomerWorkgroup. The teamType cannot be changed for existing Teams or Workgroups. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(TeamSaveResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. Save the membership status of a Workgroup member. # noqa: E501 Operation to add or update a Workgroup Member. To add or update, specify Customer Record ID, Workgroup ID, and if Workgroup Manager. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_save_workgroup_member_v1(save_workgroup_member_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param SaveWorkgroupMemberRequest save_workgroup_member_request: The request object to add or update a Workgroup Member. CustomerRecordId specifies the Customer to add or update. WorkgroupId specifies the Workgroup to update. CustomerIsWorkgroupManager specifies whether the Customer is a Workgroup Manager. (required) :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: SaveWorkgroupMemberResponse If the method is called asynchronously, returns the request thread. Save the membership status of a Workgroup member. # noqa: E501 Operation to add or update a Workgroup Member. To add or update, specify Customer Record ID, Workgroup ID, and if Workgroup Manager. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.teams_save_workgroup_member_v1_with_http_info(save_workgroup_member_request, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param SaveWorkgroupMemberRequest save_workgroup_member_request: The request object to add or update a Workgroup Member. CustomerRecordId specifies the Customer to add or update. WorkgroupId specifies the Workgroup to update. CustomerIsWorkgroupManager specifies whether the Customer is a Workgroup Manager. (required) :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(SaveWorkgroupMemberResponse, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. Cherwell REST API Unofficial Python Cherwell REST API library. # noqa: E501 The version of the OpenAPI document: 9.3.2 Contact: See AUTHORS. Generated by: https://openapi-generator.tech coding: utf-8 noqa: F401 python 2 and python 3 compatibility library noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'add_user_to_team_by_batch_request' is set noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 HTTP header `Content-Type` noqa: E501 noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'add_user_to_team_request' is set noqa: E501 noqa: E501 noqa: E501 HTTP header `Content-Type` noqa: E501 noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'add_user_to_team_request' is set noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 HTTP header `Content-Type` noqa: E501 noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'teamid' is set noqa: E501 noqa: E501 noqa: E501 noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'teamid' is set noqa: E501 noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'user_record_id' is set noqa: E501 noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'user_record_id' is set noqa: E501 noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'workgroupid' is set noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'customerrecordid' is set noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'team_id' is set noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'userrecordid' is set noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'team_id' is set noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'userrecordid' is set noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'save_team_member_request' is set noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 HTTP header `Content-Type` noqa: E501 noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'team_save_request' is set noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 HTTP header `Content-Type` noqa: E501 noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 verify the required parameter 'save_workgroup_member_request' is set noqa: E501 noqa: E501 noqa: E501 HTTP header `Accept` noqa: E501 HTTP header `Content-Type` noqa: E501 noqa: E501 Authentication setting noqa: E501 noqa: E501 noqa: E501
46,094
en
0.702382
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Base class for RPC testing.""" from collections import deque from enum import Enum import logging import optparse import os import pdb import shutil import sys import tempfile import time import traceback from .authproxy import JSONRPCException from . import coverage from .test_node import TestNode from .util import ( MAX_NODES, PortSeed, assert_equal, check_json_precision, connect_nodes_bi, disconnect_nodes, initialize_datadir, log_filename, p2p_port, set_node_times, sync_blocks, sync_mempools, ) class TestStatus(Enum): PASSED = 1 FAILED = 2 SKIPPED = 3 TEST_EXIT_PASSED = 0 TEST_EXIT_FAILED = 1 TEST_EXIT_SKIPPED = 77 BITCOIND_PROC_WAIT_TIMEOUT = 60 class BitcoinTestFramework(object): """Base class for a bitcoin test script. Individual bitcoin test scripts should subclass this class and override the following methods: - __init__() - add_options() - setup_chain() - setup_network() - run_test() The main() method should not be overridden. This class also contains various public and private helper methods.""" # Methods to override in subclass test scripts. def __init__(self): self.num_nodes = 4 self.setup_clean_chain = False self.nodes = [] self.mocktime = 0 def add_options(self, parser): pass def setup_chain(self): self.log.info("Initializing test directory " + self.options.tmpdir) if self.setup_clean_chain: self._initialize_chain_clean(self.options.tmpdir, self.num_nodes) else: self._initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir) def setup_network(self): self.setup_nodes() # Connect the nodes as a "chain". This allows us # to split the network between nodes 1 and 2 to get # two halves that can work on competing chains. for i in range(self.num_nodes - 1): connect_nodes_bi(self.nodes, i, i + 1) self.sync_all() def setup_nodes(self): extra_args = None if hasattr(self, "extra_args"): extra_args = self.extra_args self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir, extra_args) def run_test(self): raise NotImplementedError # Main function. This should not be overridden by the subclass test scripts. def main(self): parser = optparse.OptionParser(usage="%prog [options]") parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true", help="Leave bitcoinds and test.* datadir on exit or error") parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true", help="Don't stop bitcoinds after the test execution") parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../../src"), help="Source directory containing bitcoind/bitcoin-cli (default: %default)") parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"), help="Directory for caching pregenerated datadirs") parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs") parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO", help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.") parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true", help="Print out all RPC calls as they are made") parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int', help="The seed to use for assigning port numbers (default: current process id)") parser.add_option("--coveragedir", dest="coveragedir", help="Write tested RPC commands into this directory") parser.add_option("--configfile", dest="configfile", help="Location of the test framework config file") parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true", help="Attach a python debugger if test fails") self.add_options(parser) (self.options, self.args) = parser.parse_args() PortSeed.n = self.options.port_seed os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH'] check_json_precision() # Set up temp directory and start logging if self.options.tmpdir: os.makedirs(self.options.tmpdir, exist_ok=False) else: self.options.tmpdir = tempfile.mkdtemp(prefix="test") self._start_logging() success = TestStatus.FAILED try: self.setup_chain() self.setup_network() self.run_test() success = TestStatus.PASSED except JSONRPCException as e: self.log.exception("JSONRPC error") except SkipTest as e: self.log.warning("Test Skipped: %s" % e.message) success = TestStatus.SKIPPED except AssertionError as e: self.log.exception("Assertion failed") except KeyError as e: self.log.exception("Key error") except Exception as e: self.log.exception("Unexpected exception caught during testing") except KeyboardInterrupt as e: self.log.warning("Exiting after keyboard interrupt") if success == TestStatus.FAILED and self.options.pdbonfailure: print("Testcase failed. Attaching python debugger. Enter ? for help") pdb.set_trace() if not self.options.noshutdown: self.log.info("Stopping nodes") if self.nodes: self.stop_nodes() else: self.log.info("Note: bitcoinds were not stopped and may still be running") if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED: self.log.info("Cleaning up") shutil.rmtree(self.options.tmpdir) else: self.log.warning("Not cleaning up dir %s" % self.options.tmpdir) if os.getenv("PYTHON_DEBUG", ""): # Dump the end of the debug logs, to aid in debugging rare # travis failures. import glob filenames = [self.options.tmpdir + "/test_framework.log"] filenames += glob.glob(self.options.tmpdir + "/node*/regtest/debug.log") MAX_LINES_TO_PRINT = 1000 for fn in filenames: try: with open(fn, 'r') as f: print("From", fn, ":") print("".join(deque(f, MAX_LINES_TO_PRINT))) except OSError: print("Opening file %s failed." % fn) traceback.print_exc() if success == TestStatus.PASSED: self.log.info("Tests successful") sys.exit(TEST_EXIT_PASSED) elif success == TestStatus.SKIPPED: self.log.info("Test skipped") sys.exit(TEST_EXIT_SKIPPED) else: self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir) logging.shutdown() sys.exit(TEST_EXIT_FAILED) # Public helper methods. These can be accessed by the subclass test scripts. def start_node(self, i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None): """Start a bitcoind and return RPC connection to it""" if extra_args is None: extra_args = [] if binary is None: binary = os.getenv("BITCOIND", "bitcoind") node = TestNode(i, dirname, extra_args, rpchost, timewait, binary, stderr, self.mocktime, coverage_dir=self.options.coveragedir) node.start() node.wait_for_rpc_connection() if self.options.coveragedir is not None: coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc) return node def start_nodes(self, num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None): """Start multiple bitcoinds, return RPC connections to them""" if extra_args is None: extra_args = [[]] * num_nodes if binary is None: binary = [None] * num_nodes assert_equal(len(extra_args), num_nodes) assert_equal(len(binary), num_nodes) nodes = [] try: for i in range(num_nodes): nodes.append(TestNode(i, dirname, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir)) nodes[i].start() for node in nodes: node.wait_for_rpc_connection() except: # If one node failed to start, stop the others self.stop_nodes() raise if self.options.coveragedir is not None: for node in nodes: coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc) return nodes def stop_node(self, i): """Stop a bitcoind test node""" self.nodes[i].stop_node() while not self.nodes[i].is_node_stopped(): time.sleep(0.1) def stop_nodes(self): """Stop multiple bitcoind test nodes""" for node in self.nodes: # Issue RPC to stop nodes node.stop_node() for node in self.nodes: # Wait for nodes to stop while not node.is_node_stopped(): time.sleep(0.1) def assert_start_raises_init_error(self, i, dirname, extra_args=None, expected_msg=None): with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr: try: self.start_node(i, dirname, extra_args, stderr=log_stderr) self.stop_node(i) except Exception as e: assert 'bitcoind exited' in str(e) # node must have shutdown self.nodes[i].running = False self.nodes[i].process = None if expected_msg is not None: log_stderr.seek(0) stderr = log_stderr.read().decode('utf-8') if expected_msg not in stderr: raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr) else: if expected_msg is None: assert_msg = "bitcoind should have exited with an error" else: assert_msg = "bitcoind should have exited with expected error " + expected_msg raise AssertionError(assert_msg) def wait_for_node_exit(self, i, timeout): self.nodes[i].process.wait(timeout) def split_network(self): """ Split the network of four nodes into nodes 0/1 and 2/3. """ disconnect_nodes(self.nodes[1], 2) disconnect_nodes(self.nodes[2], 1) self.sync_all([self.nodes[:2], self.nodes[2:]]) def join_network(self): """ Join the (previously split) network halves together. """ connect_nodes_bi(self.nodes, 1, 2) self.sync_all() def sync_all(self, node_groups=None): if not node_groups: node_groups = [self.nodes] for group in node_groups: sync_blocks(group) sync_mempools(group) def enable_mocktime(self): """Enable mocktime for the script. mocktime may be needed for scripts that use the cached version of the blockchain. If the cached version of the blockchain is used without mocktime then the mempools will not sync due to IBD. For backwared compatibility of the python scripts with previous versions of the cache, this helper function sets mocktime to Jan 1, 2014 + (201 * 10 * 60)""" self.mocktime = 1388534400 + (201 * 10 * 60) def disable_mocktime(self): self.mocktime = 0 # Private helper methods. These should not be accessed by the subclass test scripts. def _start_logging(self): # Add logger and logging handlers self.log = logging.getLogger('TestFramework') self.log.setLevel(logging.DEBUG) # Create file handler to log all messages fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log') fh.setLevel(logging.DEBUG) # Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel. ch = logging.StreamHandler(sys.stdout) # User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper() ch.setLevel(ll) # Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted) formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S') formatter.converter = time.gmtime fh.setFormatter(formatter) ch.setFormatter(formatter) # add the handlers to the logger self.log.addHandler(fh) self.log.addHandler(ch) if self.options.trace_rpc: rpc_logger = logging.getLogger("BitcoinRPC") rpc_logger.setLevel(logging.DEBUG) rpc_handler = logging.StreamHandler(sys.stdout) rpc_handler.setLevel(logging.DEBUG) rpc_logger.addHandler(rpc_handler) def _initialize_chain(self, test_dir, num_nodes, cachedir): """Initialize a pre-mined blockchain for use by the test. Create a cache of a 200-block-long chain (with wallet) for MAX_NODES Afterward, create num_nodes copies from the cache.""" assert num_nodes <= MAX_NODES create_cache = False for i in range(MAX_NODES): if not os.path.isdir(os.path.join(cachedir, 'node' + str(i))): create_cache = True break if create_cache: self.log.debug("Creating data directories from cached datadir") # find and delete old cache directories if any exist for i in range(MAX_NODES): if os.path.isdir(os.path.join(cachedir, "node" + str(i))): shutil.rmtree(os.path.join(cachedir, "node" + str(i))) # Create cache directories, run bitcoinds: for i in range(MAX_NODES): datadir = initialize_datadir(cachedir, i) args = [os.getenv("BITCOIND", "bitcoind"), "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"] if i > 0: args.append("-connect=127.0.0.1:" + str(p2p_port(0))) self.nodes.append(TestNode(i, cachedir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None)) self.nodes[i].args = args self.nodes[i].start() # Wait for RPC connections to be ready for node in self.nodes: node.wait_for_rpc_connection() # Create a 200-block-long chain; each of the 4 first nodes # gets 25 mature blocks and 25 immature. # Note: To preserve compatibility with older versions of # initialize_chain, only 4 nodes will generate coins. # # blocks are created with timestamps 10 minutes apart # starting from 2010 minutes in the past self.enable_mocktime() block_time = self.mocktime - (201 * 10 * 60) for i in range(2): for peer in range(4): for j in range(25): set_node_times(self.nodes, block_time) self.nodes[peer].generate(1) block_time += 10 * 60 # Must sync before next peer starts generating blocks sync_blocks(self.nodes) # Shut them down, and clean up cache directories: self.stop_nodes() self.nodes = [] self.disable_mocktime() for i in range(MAX_NODES): os.remove(log_filename(cachedir, i, "debug.log")) os.remove(log_filename(cachedir, i, "db.log")) os.remove(log_filename(cachedir, i, "peers.dat")) os.remove(log_filename(cachedir, i, "fee_estimates.dat")) for i in range(num_nodes): from_dir = os.path.join(cachedir, "node" + str(i)) to_dir = os.path.join(test_dir, "node" + str(i)) shutil.copytree(from_dir, to_dir) initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf def _initialize_chain_clean(self, test_dir, num_nodes): """Initialize empty blockchain for use by the test. Create an empty blockchain and num_nodes wallets. Useful if a test case wants complete control over initialization.""" for i in range(num_nodes): initialize_datadir(test_dir, i) class ComparisonTestFramework(BitcoinTestFramework): """Test framework for doing p2p comparison testing Sets up some bitcoind binaries: - 1 binary: test binary - 2 binaries: 1 test binary, 1 ref binary - n>2 binaries: 1 test binary, n-1 ref binaries""" def __init__(self): super().__init__() self.num_nodes = 2 self.setup_clean_chain = True def add_options(self, parser): parser.add_option("--testbinary", dest="testbinary", default=os.getenv("BITCOIND", "bitcoind"), help="bitcoind binary to test") parser.add_option("--refbinary", dest="refbinary", default=os.getenv("BITCOIND", "bitcoind"), help="bitcoind binary to use for reference nodes (if any)") def setup_network(self): extra_args = [['-whitelist=127.0.0.1']]*self.num_nodes if hasattr(self, "extra_args"): extra_args = self.extra_args self.nodes = self.start_nodes( self.num_nodes, self.options.tmpdir, extra_args, binary=[self.options.testbinary] + [self.options.refbinary] * (self.num_nodes - 1)) class SkipTest(Exception): """This exception is raised to skip a test""" def __init__(self, message): self.message = message
test/functional/test_framework/test_framework.py
19,453
Base class for a bitcoin test script. Individual bitcoin test scripts should subclass this class and override the following methods: - __init__() - add_options() - setup_chain() - setup_network() - run_test() The main() method should not be overridden. This class also contains various public and private helper methods. Test framework for doing p2p comparison testing Sets up some bitcoind binaries: - 1 binary: test binary - 2 binaries: 1 test binary, 1 ref binary - n>2 binaries: 1 test binary, n-1 ref binaries This exception is raised to skip a test Initialize a pre-mined blockchain for use by the test. Create a cache of a 200-block-long chain (with wallet) for MAX_NODES Afterward, create num_nodes copies from the cache. Initialize empty blockchain for use by the test. Create an empty blockchain and num_nodes wallets. Useful if a test case wants complete control over initialization. Enable mocktime for the script. mocktime may be needed for scripts that use the cached version of the blockchain. If the cached version of the blockchain is used without mocktime then the mempools will not sync due to IBD. For backwared compatibility of the python scripts with previous versions of the cache, this helper function sets mocktime to Jan 1, 2014 + (201 * 10 * 60) Join the (previously split) network halves together. Split the network of four nodes into nodes 0/1 and 2/3. Start a bitcoind and return RPC connection to it Start multiple bitcoinds, return RPC connections to them Stop a bitcoind test node Stop multiple bitcoind test nodes Base class for RPC testing. !/usr/bin/env python3 Copyright (c) 2014-2016 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. Methods to override in subclass test scripts. Connect the nodes as a "chain". This allows us to split the network between nodes 1 and 2 to get two halves that can work on competing chains. Main function. This should not be overridden by the subclass test scripts. Set up temp directory and start logging Dump the end of the debug logs, to aid in debugging rare travis failures. Public helper methods. These can be accessed by the subclass test scripts. If one node failed to start, stop the others Issue RPC to stop nodes Wait for nodes to stop node must have shutdown Private helper methods. These should not be accessed by the subclass test scripts. Add logger and logging handlers Create file handler to log all messages Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel. User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted) add the handlers to the logger find and delete old cache directories if any exist Create cache directories, run bitcoinds: Wait for RPC connections to be ready Create a 200-block-long chain; each of the 4 first nodes gets 25 mature blocks and 25 immature. Note: To preserve compatibility with older versions of initialize_chain, only 4 nodes will generate coins. blocks are created with timestamps 10 minutes apart starting from 2010 minutes in the past Must sync before next peer starts generating blocks Shut them down, and clean up cache directories: Overwrite port/rpcport in bitcoin.conf
3,478
en
0.821795
# -*- coding: utf-8 -*- """ @author: alex """ import numpy as np def main(): """Main program execution.""" n,h1,h2,h3 = generate_ammonia_sites() nList = [[1,2,3],[0],[0],[0]] return [n,h1,h2,h3], nList def generate_ammonia_sites(): """Generate the locations for the atoms in the ammonia molecule""" x,y = np.array([1.,0.,0.]), np.array([0.,1.,0.]) #atomic distance (angstroms) a = 1.40 n = np.array([0.,0.,0.]) h1 = n + a*y h2 = n - a*y/2. + a*x*(np.sqrt(3)/2) h3 = h2 - a*x*np.sqrt(3) return n,h1,h2,h3
kappa/lattice/ammonia.py
626
Generate the locations for the atoms in the ammonia molecule Main program execution. @author: alex -*- coding: utf-8 -*-atomic distance (angstroms)
149
en
0.762434
# -*- coding: utf-8 -*- # an ugly hack to convert some stuff into other stuff... # EDIT THESE ##################################################################### names_to_highlight = ['Eren AM', 'Delmont TO', 'Esen ÖC', 'Lee STM', 'Shaiber A', 'Kiefl E', 'Cui S', 'Watson AR', 'Lolans K'] journal_name_fixes = [('The ISME journal', 'ISME J'), ('Proceedings of the National Academy of Sciences of the United States of America', 'Proc Natl Acad Sci U S A'), ('Proceedings of the National Academy of Sciences', 'Proc Natl Acad Sci U S A'), ('Frontiers in Microbiology', 'Front Microbiol')] keep_pubs_after_year = 2009 ################################################################################## import os import sys from datetime import datetime try: import anvio.utils as u from anvio.errors import ConfigError except: sys.stderr.write("This program requires anvi'o to be installed :/\n") sys.exit(-1) class Publications: def __init__(self, pubs_file_path='pubs.txt', pubs_info_file_path='pubs_info.txt'): """Takes an EndNote library exported a TXT file (`pubs_file_path`), and an optional\ TAB-delimited info file path with DOI identifiers (`pubs_info_file_path`), and\ generates some Markdown formatted output. Here is an info line from the EndNote: Winterberg, K. M., and Reznikoff, W. S. (2007). "Screening transposon mutant libraries using full-genome oligonucleotide microarrays." Methods Enzymol, 421, 110-25. Absolute matching to this format is required. Expected headers in the TAB-delimited pubs info file are 'doi', 'highlights',\ and 'featured_image'. - doi: The DOI of the pub matching to a pubs file path entry. - highlights: Brief bullet points about the work. Each pont must be separated\ from the rest with a ';' character. HTML tags are OK. - featured_image: A URL to an image. If things are not working, feel free to write to meren at uchicago.edu """ self.info = {} self.pubs_dict = {} self.journals_list = [] self.authors_list = [] self.recent_authors_list = [] self.author_links = {} self.pubs_file_path = pubs_file_path self.pubs_info_file_path = pubs_info_file_path def get_author_highlights(self, pub): authors_str = [] for author in pub['authors']: if author in pub['co_first_authors']: author_h = author + '<sup>☯</sup>' elif author in pub['co_senior_authors']: author_h = author + '<sup>‡</sup>' else: author_h = author if author in names_to_highlight: authors_str.append('<span class="pub-member-author">%s</span>' % (author_h)) else: authors_str.append(author_h) return ', '.join(authors_str) def parse_pubs_txt(self): if os.path.exists(self.pubs_info_file_path): self.info = u.get_TAB_delimited_file_as_dictionary(self.pubs_info_file_path) pubs_header = u.get_columns_of_TAB_delim_file(self.pubs_file_path, include_first_column=True) headers_expected = ['Authors', 'Title', 'Publication', 'Volume', 'Number', 'Pages', 'Year', 'doi'] missing_headers = [h for h in pubs_header if h not in headers_expected] if len(missing_headers): raise ConfigError("Sorry, the pubs.txt seems to be missing some of the headers that are mandatory. Each of \ the columns in the following list must be present in this file: %s (hint: yours do not have\ the following: %s)." % (', '.join(headers_expected), ', '.join(missing_headers))) self.pubs_txt = u.get_TAB_delimited_file_as_dictionary(self.pubs_file_path, indexing_field=pubs_header.index('doi')) for doi in self.pubs_txt: authors = [] co_first_authors = [] co_senior_authors = [] p = self.pubs_txt[doi] for author in [_.strip() for _ in p['Authors'].split(';')]: if not len(author): continue author_last_name, author_first_name_raw = [_.strip() for _ in author.split(',')] author_first_name = ''.join([n[0] for n in author_first_name_raw.split()]) author_final_name = '%s %s' % (author_last_name, author_first_name) if author_first_name_raw.endswith('*'): co_first_authors.append(author_final_name) elif author_first_name_raw.endswith('+'): co_senior_authors.append(author_final_name) authors.append(author_final_name) if p['Number']: issue = '%s(%s):%s' % (p['Volume'], p['Number'], p['Pages']) else: issue = '%s:%s' % (p['Volume'], p['Pages']) year = p['Year'].strip() pub_entry = {'authors': authors, 'title': p['Title'], 'journal': p['Publication'], 'issue': issue, 'doi': doi, 'year': year, 'co_first_authors': co_first_authors, 'co_senior_authors': co_senior_authors} if year not in self.pubs_dict: self.pubs_dict[year] = [pub_entry] else: self.pubs_dict[year].append(pub_entry) def get_markdown_text_for_pub(self, pub): """Gets a dictionary `pub`, returns a markdown formatted text. An example pub: {'authors': 'McLellan, S. L., and Eren, A. M.', 'doi': '10.1016/j.tim.2014.08.002', 'issue': '22(12), 697-706', 'title': 'Discovering new indicators of fecal pollution.', 'journal': 'Trends Microbiol', 'year': 2014} """ pub_md = [] A = lambda s: pub_md.append(s) A('<div class="pub">') A('''<div class='altmetric-embed' data-badge-type='donut' data-doi="%s"></div>''' % pub['doi']) A('''<div class="__dimensions_badge_embed__" data-doi="%s" data-hide-zero-citations="true" data-legend="hover-bottom" data-style="small_circle"></div>''' % pub['doi']) if pub['doi']: A(' <h3><a href="%s" target="_new">%s</a></h3>' % (' https://doi.org/%s' % (pub['doi']), pub['title'])) else: A(' <h3><a href="http://scholar.google.com/scholar?hl=en&q=%s" target="_new">%s</a></h3>' % ('http://scholar.google.com/scholar?hl=en&q=%s' % (pub['title'].replace(' ', '+')), pub['title'])) A(' <span class="pub-authors">%s</span>' % self.get_author_highlights(pub)) if pub['co_first_authors'] and not pub['co_senior_authors']: A(' <span class="pub-co-first-authors"><sup>☯</sup>Co-first authors</span>') elif pub['co_first_authors'] and pub['co_senior_authors']: A(' <span class="pub-co-first-authors"><sup>☯</sup>Co-first authors; <sup>‡</sup>Co-senior authors</span>') elif pub['co_senior_authors'] and not pub['co_first_authors']: A(' <span class="pub-co-first-authors"><sup>‡</sup>Co-senior authors</span>') if pub['doi'] in self.info: info = self.info[pub['doi']] A(' <div class="%s">' % ('pub-info' if info['featured_image'] else 'pub-info-no-image')) if info['featured_image']: A(' <div class="pub-featured-image">') A(' <a href="%s"><img src="%s" style="max-width: 100px; max-height: 80px; width: auto; border: none; height: auto; margin: 0 auto; display: block; transform: translateY(15%%);"/></a>' % (info['featured_image'], info['featured_image'])) A(' </div>') highlights = info['highlights'].split(';') if info['highlights'] else None if highlights: A(' <div class="%s">' % ('pub-highlights' if info['featured_image'] else 'pub-highlights-no-image')) A(' %s' % '<br>'.join(['<span style="display: inline-block; padding-bottom: 5px;">- %s</span>' % h for h in highlights])) A(' </div>') A(' </div>') A(' <span class="pub-journal"><b>%s</b>, %s.</span>' % (pub['journal'], pub['issue'])) A('</div>\n') return '\n'.join(pub_md) def store_markdown_output_for_pubs(self, output_file_path): # years = ''.join(['<a href="#%s"><span class="category-item">%s <small>(%d)</small></span></a>' % (y, y, len(self.pubs_dict[y])) for y in sorted(list(self.pubs_dict.keys()), reverse=True)]) years = ''.join(['<a href="#%s"><span class="category-item">%s</span></a>' % (y, y) for y in sorted(list(self.pubs_dict.keys()), reverse=True)]) output_file = open(output_file_path, 'w') W = lambda s: output_file.write(s + '\n') W('---') W('layout: publications') W('modified: %s' % datetime.today().strftime('%Y-%m-%d')) W('comments: false') W('---\n') W('''<script type='text/javascript' src='https://d1bxh8uas1mnw7.cloudfront.net/assets/embed.js'></script>\n''') W('''<script async src="https://badge.dimensions.ai/badge.js" charset="utf-8"></script>\n''') W('<div class="category-box">\n%s\n</div>\n' % years) W('{:.notice}\n') W("This page shows publications that are most reflective of our interests. For a complete list, please see <a href='https://scholar.google.com/citations?user=GtLLuxoAAAAJ&view_op=list_works&sortby=pubdate' target='_blank'>Meren's Google Scholar page</a>.\n") for year in sorted(list(self.pubs_dict.keys()), reverse=True): W('<a name="%s">&nbsp;</a>' % year) W('<h1>%s</h1>\n' % year) for pub in self.pubs_dict[year]: W(self.get_markdown_text_for_pub(pub)) W('') if __name__ == '__main__': pubs = Publications() try: pubs.parse_pubs_txt() pubs.store_markdown_output_for_pubs('publications/index.md') except ConfigError as e: print(e) sys.exit(-1)
pubs.py
10,446
Takes an EndNote library exported a TXT file (`pubs_file_path`), and an optional TAB-delimited info file path with DOI identifiers (`pubs_info_file_path`), and generates some Markdown formatted output. Here is an info line from the EndNote: Winterberg, K. M., and Reznikoff, W. S. (2007). "Screening transposon mutant libraries using full-genome oligonucleotide microarrays." Methods Enzymol, 421, 110-25. Absolute matching to this format is required. Expected headers in the TAB-delimited pubs info file are 'doi', 'highlights', and 'featured_image'. - doi: The DOI of the pub matching to a pubs file path entry. - highlights: Brief bullet points about the work. Each pont must be separated from the rest with a ';' character. HTML tags are OK. - featured_image: A URL to an image. If things are not working, feel free to write to meren at uchicago.edu Gets a dictionary `pub`, returns a markdown formatted text. An example pub: {'authors': 'McLellan, S. L., and Eren, A. M.', 'doi': '10.1016/j.tim.2014.08.002', 'issue': '22(12), 697-706', 'title': 'Discovering new indicators of fecal pollution.', 'journal': 'Trends Microbiol', 'year': 2014} -*- coding: utf-8 -*- an ugly hack to convert some stuff into other stuff... EDIT THESE years = ''.join(['<a href="%s"><span class="category-item">%s <small>(%d)</small></span></a>' % (y, y, len(self.pubs_dict[y])) for y in sorted(list(self.pubs_dict.keys()), reverse=True)])
1,546
en
0.549922
# MIT License # Copyright (c) 2020 Andrew Wells # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. print_LTL_game = True obstacle_cells = [[3,0], [3,1], [3,3], [3,4], [3,5], [3,6], [3,7], [3,8], [5,2], [5,3], [5,4], [5,5], [5,6], [5,7], [5,8], [5,9], [7,0], [7,1], [7,3], [7,4], [7,5], [7,6], [7,7], [7,8]] num_rows = 10 num_cols = 10 probN = 0.69 probE = 0.1 probW = 0.1 probB = 0.01 probS = 0.1 def rc2i_short(row, col): if row < num_rows and row >= 0 and col < num_cols and col >= 0: return row * num_rows + col return -1 def rc2i(row, col): cell = -1 if row < num_rows and row >= 0 and col < num_cols and col >= 0: cell = row * num_rows + col for c in obstacle_cells: if cell == rc2i_short(c[0], c[1]): return -1 return cell def printNorth(row, col): extraProb = 0 str = "[] x={} -> ".format(rc2i(i,j)) if(rc2i(i-1, j) == -1): extraProb += probN else: str = str + " {}:(x'={}) +".format(probN, rc2i(i-1, j)) if(rc2i(i+1, j) == -1): extraProb = extraProb + probB else: str = str + " {}:(x'={}) +".format(probB, rc2i(i+1, j)) if(rc2i(i, j+1) == -1): extraProb = extraProb + probE else: str = str + " {}:(x'={}) +".format(probE, rc2i(i, j+1)) if(rc2i(i, j-1) == -1): extraProb = extraProb + probW else: str = str + " {}:(x'={}) +".format(probW, rc2i(i, j-1)) print(str + " {}:(x'={});".format(probS+extraProb, rc2i(i,j))) def printSouth(row, col): extraProb = 0 str = "[] x={} -> ".format(rc2i(i,j)) if(rc2i(i-1, j) == -1): extraProb += probB else: str = str + " {}:(x'={}) +".format(probB, rc2i(i-1, j)) if(rc2i(i+1, j) == -1): extraProb = extraProb + probN else: str = str + " {}:(x'={}) +".format(probN, rc2i(i+1, j)) if(rc2i(i, j+1) == -1): extraProb = extraProb + probW else: str = str + " {}:(x'={}) +".format(probW, rc2i(i, j+1)) if(rc2i(i, j-1) == -1): extraProb = extraProb + probE else: str = str + " {}:(x'={}) +".format(probE, rc2i(i, j-1)) print(str + " {}:(x'={});".format(probS+extraProb, rc2i(i,j))) def printEast(row, col): extraProb = 0 str = "[] x={} -> ".format(rc2i(i,j)) if(rc2i(i-1, j) == -1): extraProb += probW else: str = str + " {}:(x'={}) +".format(probW, rc2i(i-1, j)) if(rc2i(i+1, j) == -1): extraProb = extraProb + probE else: str = str + " {}:(x'={}) +".format(probE, rc2i(i+1, j)) if(rc2i(i, j+1) == -1): extraProb = extraProb + probN else: str = str + " {}:(x'={}) +".format(probN, rc2i(i, j+1)) if(rc2i(i, j-1) == -1): extraProb = extraProb + probB else: str = str + " {}:(x'={}) +".format(probB, rc2i(i, j-1)) print(str + " {}:(x'={});".format(probS+extraProb, rc2i(i,j))) def printWest(row, col): extraProb = 0 str = "[] x={} -> ".format(rc2i(i,j)) if(rc2i(i-1, j) == -1): extraProb += probE else: str = str + " {}:(x'={}) +".format(probE, rc2i(i-1, j)) if(rc2i(i+1, j) == -1): extraProb = extraProb + probW else: str = str + " {}:(x'={}) +".format(probW, rc2i(i+1, j)) if(rc2i(i, j+1) == -1): extraProb = extraProb + probB else: str = str + " {}:(x'={}) +".format(probB, rc2i(i, j+1)) if(rc2i(i, j-1) == -1): extraProb = extraProb + probN else: str = str + " {}:(x'={}) +".format(probN, rc2i(i, j-1)) print(str + " {}:(x'={});".format(probS+extraProb, rc2i(i,j))) print("mdp") print("") print("module M1") print("") if print_LTL_game: print(" x : [0..{}] init 0;".format(num_rows*num_cols)) else: print(" x : [0..{}] init 0;".format(num_rows*num_cols-1)) #print inner cells for i in range (num_rows): for j in range (num_cols): ##Moving north printNorth(i,j) printSouth(i,j) printEast(i,j) printWest(i,j) if print_LTL_game: print("") for i in range (num_rows*num_cols): print("[] x={} -> 1:(x'={});".format(i, num_rows*num_cols)) print("[] x={} -> 1:(x'={});".format(num_rows*num_cols, num_rows*num_cols)) print("") print("endmodule") print("") print("// labels") print("label \"initial\" = (x=0);") print("label \"loca\" = (x=26);") print("label \"locb\" = (x=85);") print("label \"locc\" = (x=16);") print("label \"locd\" = (x=7);") print("label \"loce\" = (x=45);") print("label \"locf\" = (x=91);") print("label \"locg\" = (x=41);") print("label \"loch\" = (x=67);") print("label \"loci\" = (x=20);") print("label \"zbad\" = (x=2);") print("label \"done\" = (x={});".format(num_rows*num_cols))
gridworld_hallways/make_grid_mdp.py
5,746
MIT License Copyright (c) 2020 Andrew Wells Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.print inner cellsMoving north
1,093
en
0.858594
# pylint: disable=redefined-outer-name from .utils import TestCase from .utils import run_tests_assert_success import itertools import os import slash import pytest from .utils.suite_writer import Suite @pytest.mark.parametrize('parametrize', [True, False]) def test_class_name(suite, suite_test, test_type, parametrize): if parametrize: suite_test.add_parameter(num_values=3) summary = suite.run() for result in summary.get_all_results_for_test(suite_test): if test_type == 'method': assert result.test_metadata.class_name.startswith('Test') assert '(' not in result.test_metadata.class_name elif test_type == 'function': assert result.test_metadata.class_name is None else: raise NotImplementedError() # pragma: no cover @pytest.mark.parametrize('parametrize', [True, False]) def test_function_name(suite, suite_test, parametrize): if parametrize: suite_test.add_parameter(num_values=3) summary = suite.run() for result in summary.get_all_results_for_test(suite_test): function_name = result.test_metadata.function_name assert function_name.startswith('test_') assert '.' not in result.test_metadata.function_name assert '(' not in result.test_metadata.function_name def test_variation(suite, suite_test): fixture = suite.slashconf.add_fixture() param = fixture.add_parameter() # pylint: disable=unused-variable suite_test.depend_on_fixture(fixture) suite_test.append_line('slash.context.result.data["variation"] = slash.context.test.__slash__.variation.values.copy()') summary = suite.run() for result in summary.get_all_results_for_test(suite_test): assert len(result.data['variation']) == 1 assert fixture.name not in result.data['variation'] assert '{}.{}'.format(fixture.name, param.name) in result.data['variation'] def test_function_name_with_special_parameters(test_type): suite = Suite() assert len(suite) == 0 # pylint: disable=len-as-condition suite_test = suite.add_test(type=test_type) values = ['a.b', 'a(b'] suite_test.add_parameter(values=values) # we can't verify result because we would not be able to parse the function properly # TODO: this will change once we properly support variations metadata # pylint: disable=fixme summary = suite.run(verify=False, sort=False) for result, value in itertools.zip_longest(summary.session.results, values): function_name = result.test_metadata.function_name assert value not in function_name assert '.' not in result.test_metadata.function_name assert '(' not in result.test_metadata.function_name assert function_name.startswith('test_') def test_module_name_not_none_or_empty_string(suite): for result in suite.run().session.results: assert result.test_metadata.module_name def test_test_index(suite): index = None session = suite.run().session for index, result in enumerate(session.results): assert result.test_metadata.test_index0 == index assert result.test_metadata.test_index1 == index + 1 assert index > 0 def test_set_test_name(test_metadata): assert test_metadata.file_path in str(test_metadata) custom_name = 'some_custom_name' test_metadata.set_test_full_name(custom_name) assert str(test_metadata) == '<{}>'.format(custom_name) def test_class_name_with_dot_parameters(): # pylint: disable=unused-argument @slash.parametrize('path', ['x.y']) def test_something(path): pass with slash.Session() as s: # pylint: disable=unused-variable loader = slash.loader.Loader() [test] = loader.get_runnables(test_something) # pylint: disable=unbalanced-tuple-unpacking assert test.__slash__.class_name is None def test_set_file_path(test_metadata): file_path = '/tmp/file_path.py' assert file_path not in test_metadata.address test_metadata.set_file_path(file_path) assert test_metadata.file_path == file_path assert file_path in test_metadata.address def test_mark_interactive(test_metadata): test_metadata.mark_interactive() assert test_metadata.is_interactive() @pytest.fixture def test_metadata(suite, suite_test): return suite.run()[suite_test].test_metadata class TestMetadataTest(TestCase): loaded_tests = [] def setUp(self): @slash.hooks.register def tests_loaded(tests): # pylint: disable=unused-variable TestMetadataTest.loaded_tests = tests super(TestMetadataTest, self).setUp() self.root = self.get_new_path() self.filename = os.path.join(self.root, "testfile.py") with open(self.filename, "w") as f: f.write(_TEST_FILE_TEMPLATE) with slash.Session() as s: self.session = run_tests_assert_success(self.filename, session=s) self.tests = self.loaded_tests self.results = list(self.session.results.iter_test_results()) self.results.sort(key=lambda result: str(result.test_metadata)) def test_tests_have_correct_metadata(self): for test, result in zip(self.tests, self.session.results.iter_test_results()): self.assertIs(test.__slash__, result.test_metadata) def test_simple_test_address(self): self.assertEqual(self.results[0].test_metadata.address, "{}:T001.test_method".format(self.filename)) def test_parameterized_test_address(self): parameterized = set(x.test_metadata.address for x in self.results[1:]) self.assertEqual(parameterized, set( "{0}:T002.test_parameters(after:c={2},b={3},before:a={1})".format(self.filename, a, c, b) for a, b, c in itertools.product([1, 2], [3, 4], [5, 6]))) _TEST_FILE_TEMPLATE = """ import slash class T001(slash.Test): def test_method(self): pass class T002(slash.Test): @slash.parameters.iterate(a=[1, 2]) def before(self, a): pass @slash.parameters.iterate(b=[3, 4]) def test_parameters(self, b): pass @slash.parameters.iterate(c=[5, 6]) def after(self, c): pass """
tests/test_test_metadata.py
6,202
pylint: disable=redefined-outer-name pragma: no cover pylint: disable=unused-variable pylint: disable=len-as-condition we can't verify result because we would not be able to parse the function properly TODO: this will change once we properly support variations metadata pylint: disable=fixme pylint: disable=unused-argument pylint: disable=unused-variable pylint: disable=unbalanced-tuple-unpacking pylint: disable=unused-variable
432
en
0.64426
# Time: O(n) # Space: O(1) # # You are climbing a stair case. It takes n steps to reach to the top. # # Each time you can either climb 1 or 2 steps. # In how many distinct ways can you climb to the top? class Solution: """ :type n: int :rtype: int """ def climbStairs(self, n): prev, current = 0, 1 for i in xrange(n): prev, current = current, prev + current, return current def climbStairs1(self, n): if n == 1: return 1 if n == 2: return 2 return self.climbStairs(n - 1) + self.climbStairs(n - 2) if __name__ == "__main__": result = Solution().climbStairs(2) print result
Python/climbing-stairs.py
698
Time: O(n) Space: O(1) You are climbing a stair case. It takes n steps to reach to the top. Each time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?
191
en
0.948134
#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # $Id$ # # Project: GDAL/OGR Test Suite # Purpose: Style testing. # Author: Even Rouault <even dot rouault at mines dash paris dot org> # ############################################################################### # Copyright (c) 2014, Even Rouault <even dot rouault at mines-paris dot org> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ############################################################################### import sys sys.path.append( '../pymod' ) import gdaltest from osgeo import ogr from osgeo import gdal ############################################################################### # # def ogr_style_styletable(): style_table = ogr.StyleTable() style_table.AddStyle("style1_normal", 'SYMBOL(id:"http://style1_normal",c:#67452301)') gdal.PushErrorHandler('CPLQuietErrorHandler') ret = style_table.SaveStyleTable('/nonexistingdir/nonexistingfile') gdal.PopErrorHandler() if ret != 0: gdaltest.post_reason('failure') print(ret) return 'fail' if style_table.SaveStyleTable("/vsimem/out.txt") != 1: gdaltest.post_reason('failure') return 'fail' style_table = None style_table = ogr.StyleTable() gdal.PushErrorHandler('CPLQuietErrorHandler') ret = style_table.LoadStyleTable('/nonexisting') gdal.PopErrorHandler() if ret != 0: gdaltest.post_reason('failure') return 'fail' if style_table.LoadStyleTable('/vsimem/out.txt') != 1: gdaltest.post_reason('failure') return 'fail' gdal.Unlink('/vsimem/out.txt') gdal.PushErrorHandler('CPLQuietErrorHandler') ret = style_table.Find("non_existing_style") gdal.PopErrorHandler() if ret is not None: gdaltest.post_reason('failure') return 'fail' if style_table.Find("style1_normal") != 'SYMBOL(id:"http://style1_normal",c:#67452301)': gdaltest.post_reason('failure') return 'fail' style = style_table.GetNextStyle() if style != 'SYMBOL(id:"http://style1_normal",c:#67452301)': gdaltest.post_reason('failure') return 'fail' style_name = style_table.GetLastStyleName() if style_name != 'style1_normal': gdaltest.post_reason('failure') return 'fail' style = style_table.GetNextStyle() if style is not None: gdaltest.post_reason('failure') return 'fail' style_table.ResetStyleStringReading() style = style_table.GetNextStyle() if style is None: gdaltest.post_reason('failure') return 'fail' # GetStyleTable()/SetStyleTable() on data source ds = ogr.GetDriverByName('Memory').CreateDataSource('') if ds.GetStyleTable() is not None: gdaltest.post_reason('failure') return 'fail' ds.SetStyleTable(None) if ds.GetStyleTable() is not None: gdaltest.post_reason('failure') return 'fail' ds.SetStyleTable(style_table) style_table2 = ds.GetStyleTable() style = style_table2.GetNextStyle() if style != 'SYMBOL(id:"http://style1_normal",c:#67452301)': gdaltest.post_reason('failure') return 'fail' # GetStyleTable()/SetStyleTable() on layer lyr = ds.CreateLayer('foo') if lyr.GetStyleTable() is not None: gdaltest.post_reason('failure') return 'fail' lyr.SetStyleTable(None) if lyr.GetStyleTable() is not None: gdaltest.post_reason('failure') return 'fail' lyr.SetStyleTable(style_table) style_table2 = lyr.GetStyleTable() style = style_table2.GetNextStyle() if style != 'SYMBOL(id:"http://style1_normal",c:#67452301)': gdaltest.post_reason('failure') return 'fail' ds = None return 'success' ############################################################################### # Build tests runner gdaltest_list = [ ogr_style_styletable ] if __name__ == '__main__': gdaltest.setup_run( 'ogr_style' ) gdaltest.run_tests( gdaltest_list ) gdaltest.summarize()
autotest/ogr/ogr_style.py
5,137
!/usr/bin/env python -*- coding: utf-8 -*- $Id$ Project: GDAL/OGR Test Suite Purpose: Style testing. Author: Even Rouault <even dot rouault at mines dash paris dot org> Copyright (c) 2014, Even Rouault <even dot rouault at mines-paris dot org> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. GetStyleTable()/SetStyleTable() on data source GetStyleTable()/SetStyleTable() on layer Build tests runner
1,378
en
0.834312
import os.path import time from resource_management.core.exceptions import Fail from resource_management.core.source import Template from resource_management.core.source import StaticFile from resource_management.core.source import DownloadSource from resource_management.core.resources import Execute from resource_management.core.resources.system import Directory from resource_management.core.resources.system import File from resource_management.libraries.functions import get_user_call_output from resource_management.libraries.functions import format from resource_management.libraries.functions.show_logs import show_logs from resource_management.libraries.functions.security_commons import update_credential_provider_path from resource_management.libraries.resources.xml_config import XmlConfig from resource_management.core.logger import Logger from resource_management.libraries.script.config_dictionary import UnknownConfiguration import beacon_utils from resource_management.libraries.script import Script import ranger_api_functions def install_beacon(): import params Directory([params.etc_prefix_dir], owner=params.beacon_user, group=params.user_group, mode=0755, create_parents=True) if not os.path.exists(Script.get_stack_root() + '/' + params.version_dir) or not os.path.exists( params.install_dir): Execute('rm -rf %s' % Script.get_stack_root() + '/' + params.version_dir) Execute('rm -rf %s' % params.install_dir) Execute( 'wget ' + params.download_url + ' -O /tmp/' + params.filename, user=params.beacon_user) Execute('tar -zxf /tmp/' + params.filename + ' -C ' + Script.get_stack_root()) Execute('ln -s ' + Script.get_stack_root() + '/' + params.version_dir + ' ' + params.install_dir) Execute(' cp -r ' + params.install_dir + '/conf/* ' + params.etc_prefix_dir) Execute(' rm -rf ' + params.install_dir + '/conf') Execute('ln -s ' + params.etc_prefix_dir + ' ' + params.install_dir + '/conf') Execute('chown -R %s:%s %s/%s' % (params.beacon_user, params.user_group, params.stack_root, params.version_dir)) Execute('chown -R %s:%s %s' % (params.beacon_user, params.user_group, params.install_dir)) Execute('/bin/rm -f /tmp/' + params.filename) def beacon(type, action=None, upgrade_type=None): import params if action == 'config': create_directory(params.beacon_home_dir) create_directory(params.beacon_plugin_staging_dir) cloud_cred_provider = params.beacon_cloud_cred_provider_dir.split('://')[1] cloud_cred_parts = cloud_cred_provider.split('/', 1) create_directory("/" + cloud_cred_parts[1], cloud_cred_parts[0]) if params.is_hive_installed: if not isinstance(params.hive_repl_cmrootdir, UnknownConfiguration): beacon_utils.create_hdfs_directory(params.hive_repl_cmrootdir, params.hive_user, 01777) if not isinstance(params.hive_repl_rootdir, UnknownConfiguration): beacon_utils.create_hdfs_directory(params.hive_repl_rootdir, params.hive_user, 0700) Directory(params.beacon_pid_dir, owner=params.beacon_user, create_parents=True, mode=0755, cd_access="a", ) Directory(params.beacon_data_dir, owner=params.beacon_user, create_parents=True, mode=0755, cd_access="a", ) Directory(params.beacon_log_dir, owner=params.beacon_user, create_parents=True, mode=0755, cd_access="a", ) Directory(params.beacon_webapp_dir, owner=params.beacon_user, create_parents=True) Directory(params.beacon_home, owner=params.beacon_user, create_parents=True) Directory(params.etc_prefix_dir, mode=0755, create_parents=True) Directory(params.beacon_conf_dir, owner=params.beacon_user, create_parents=True) environment_dictionary = { "HADOOP_HOME": params.hadoop_home_dir, "JAVA_HOME": params.java_home, "BEACON_LOG_DIR": params.beacon_log_dir, "BEACON_PID_DIR": params.beacon_pid_dir, "BEACON_DATA_DIR": params.beacon_data_dir, "BEACON_CLUSTER": params.beacon_cluster_name, "HADOOP_CONF": params.hadoop_conf_dir } pid = get_user_call_output.get_user_call_output(format("cat {server_pid_file}"), user=params.beacon_user, is_checked_call=False)[1] process_exists = format("ls {server_pid_file} && ps -p {pid}") if type == 'server': if action == 'start': try: if params.credential_store_enabled: if 'hadoop.security.credential.provider.path' in params.beacon_env: credential_provider_path = params.beacon_env['hadoop.security.credential.provider.path'] credential_provider_src_path = credential_provider_path[len('jceks://file'):] File(params.beacon_credential_provider_path[len('jceks://file'):], owner=params.beacon_user, group=params.user_group, mode=0640, content=StaticFile(credential_provider_src_path) ) else: Logger.error( "hadoop.security.credential.provider.path property not found in beacon-env config-type") File(os.path.join(params.beacon_conf_dir, 'beacon.yml'), owner='root', group='root', mode=0644, content=Template("beacon.yml.j2") ) params.beacon_security_site = update_credential_provider_path( params.beacon_security_site, 'beacon-security-site', os.path.join(params.beacon_conf_dir, 'beacon-security-site.jceks'), params.beacon_user, params.user_group ) XmlConfig("beacon-security-site.xml", conf_dir=params.beacon_conf_dir, configurations=params.beacon_security_site, configuration_attributes=params.config['configuration_attributes']['beacon-security-site'], owner=params.beacon_user, group=params.user_group, mode=0644 ) Execute(format('{beacon_home}/bin/beacon setup'), user=params.beacon_user, path=params.hadoop_bin_dir, environment=environment_dictionary ) if params.download_mysql_driver: download_mysql_driver() Execute(format('{beacon_home}/bin/beacon start'), user=params.beacon_user, path=params.hadoop_bin_dir, environment=environment_dictionary, not_if=process_exists, ) if params.has_ranger_admin: ranger_admin_url = params.config['configurations']['admin-properties']['policymgr_external_url'] ranger_admin_user = params.config['configurations']['ranger-env']['admin_username'] ranger_admin_passwd = params.config['configurations']['ranger-env']['admin_password'] if not params.security_enabled: # Creating/Updating beacon.ranger.user with role "ROLE_SYS_ADMIN" response_user = ranger_api_functions.get_user(ranger_admin_url, params.beacon_ranger_user, format( "{ranger_admin_user}:{ranger_admin_passwd}")) if response_user is not None and response_user['name'] == params.beacon_ranger_user: response_user_role = response_user['userRoleList'][0] Logger.info(format( "Beacon Ranger User with username {beacon_ranger_user} exists with role {response_user_role}")) if response_user_role != "ROLE_SYS_ADMIN": response_user_role = ranger_api_functions.update_user_role(ranger_admin_url, params.beacon_ranger_user, "ROLE_SYS_ADMIN", format( "{ranger_admin_user}:{ranger_admin_passwd}")) else: response_code = ranger_api_functions.create_user(ranger_admin_url, params.beacon_ranger_user, params.beacon_ranger_password, "ROLE_SYS_ADMIN", format( "{ranger_admin_user}:{ranger_admin_passwd}")) # Updating beacon_user role depending upon cluster environment count = 0 while count < 10: beacon_user_get = ranger_api_functions.get_user(ranger_admin_url, params.beacon_user, format( "{ranger_admin_user}:{ranger_admin_passwd}")) if beacon_user_get is not None: break else: time.sleep(10) # delay for 10 seconds count = count + 1 Logger.error( format('Retrying to fetch {beacon_user} user from Ranger Admin for {count} time(s)')) if beacon_user_get is not None and beacon_user_get['name'] == params.beacon_user: beacon_user_get_role = beacon_user_get['userRoleList'][0] if params.security_enabled and beacon_user_get_role != "ROLE_SYS_ADMIN": beacon_service_user = ranger_api_functions.update_user_role(ranger_admin_url, params.beacon_user, "ROLE_SYS_ADMIN", format( "{ranger_admin_user}:{ranger_admin_passwd}")) elif not params.security_enabled and beacon_user_get_role != "ROLE_USER": beacon_service_user = ranger_api_functions.update_user_role(ranger_admin_url, params.beacon_user, "ROLE_USER", format( "{ranger_admin_user}:{ranger_admin_passwd}")) if params.ranger_hive_plugin_enabled: # Get Ranger Hive default policy for resource database, table, column response_policy = ranger_api_functions.get_ranger_service_default_policy(ranger_admin_url, params.service_name, format( "{ranger_admin_user}:{ranger_admin_passwd}"), ['database', 'table', 'column']) if response_policy: user_present = ranger_api_functions.check_user_policy(response_policy, params.beacon_user) if not user_present and beacon_user_get is not None and beacon_user_get[ 'name'] == params.beacon_user: policy_id = response_policy['id'] beacon_user_policy_item = {'groups': [], 'conditions': [], 'users': [params.beacon_user], 'accesses': [{'isAllowed': True, 'type': 'all'}, {'isAllowed': True, 'type': 'repladmin'}], 'delegateAdmin': False} policy_data = ranger_api_functions.update_policy_item(response_policy, beacon_user_policy_item) update_policy_response = ranger_api_functions.update_policy(ranger_admin_url, policy_id, policy_data, format( "{ranger_admin_user}:{ranger_admin_passwd}")) # Get Ranger Hive default policy for resource hiveservice response_policy = ranger_api_functions.get_ranger_service_default_policy(ranger_admin_url, params.service_name, format( "{ranger_admin_user}:{ranger_admin_passwd}"), ['hiveservice']) if response_policy: user_present = ranger_api_functions.check_user_policy(response_policy, params.beacon_user) if not user_present and beacon_user_get is not None and beacon_user_get[ 'name'] == params.beacon_user: # Updating beacon_user in Ranger Hive default policy for resource hiveservice policy_id = response_policy['id'] beacon_user_policy_item = {'groups': [], 'conditions': [], 'users': [params.beacon_user], 'accesses': [{'isAllowed': True, 'type': 'serviceadmin'}], 'delegateAdmin': False} policy_data = ranger_api_functions.update_policy_item(response_policy, beacon_user_policy_item) update_policy_response = ranger_api_functions.update_policy(ranger_admin_url, policy_id, policy_data, format( "{ranger_admin_user}:{ranger_admin_passwd}")) if params.ranger_atlas_plugin_enabled: # Creating beacon.atlas.user with role "ROLE_USER" beacon_atlas_user_response = ranger_api_functions.get_user(ranger_admin_url, params.beacon_atlas_user, format( "{ranger_admin_user}:{ranger_admin_passwd}")) if beacon_atlas_user_response is not None and beacon_atlas_user_response[ 'name'] == params.beacon_atlas_user: beacon_atlas_user_role = beacon_atlas_user_response['userRoleList'][0] Logger.info(format( "Beacon Atlas User with username {beacon_atlas_user} exists with role {beacon_atlas_user_role}")) else: beacon_atlas_user_create_response_code = ranger_api_functions.create_user(ranger_admin_url, params.beacon_atlas_user, params.beacon_atlas_password, "ROLE_USER", format( "{ranger_admin_user}:{ranger_admin_passwd}")) if params.security_enabled: get_beacon_atlas_user = params.beacon_user else: get_beacon_atlas_user = params.beacon_atlas_user if params.is_stack_3_0_or_further: # Get Ranger Atlas default policy for ENTITY TYPE, ENTITY CLASSIFICATION and ENTITY ID resource atlas_entity_policy_response = ranger_api_functions.get_ranger_service_default_policy( ranger_admin_url, params.ranger_atlas_service_name, format("{ranger_admin_user}:{ranger_admin_passwd}"), ['entity', 'entity-classification', 'entity-type']) if atlas_entity_policy_response: beacon_atlas_user_present = ranger_api_functions.check_user_policy( atlas_entity_policy_response, get_beacon_atlas_user) if not beacon_atlas_user_present: # Updating beacon atlas user in Ranger Atlas default policy for entity resource atlas_entity_policy_id = atlas_entity_policy_response['id'] beacon_atlas_user_policy_item = {'groups': [], 'conditions': [], 'users': [get_beacon_atlas_user], 'accesses': [ {'type': 'entity-read', 'isAllowed': True}, {'type': 'entity-create', 'isAllowed': True}, {'type': 'entity-update', 'isAllowed': True}]} atlas_entity_policy_data = ranger_api_functions.update_policy_item( atlas_entity_policy_response, beacon_atlas_user_policy_item) atlas_update_entity_policy_response = ranger_api_functions.update_policy( ranger_admin_url, atlas_entity_policy_id, atlas_entity_policy_data, format("{ranger_admin_user}:{ranger_admin_passwd}")) # Get Ranger Atlas default policy for ATLAS SERVICE resource atlas_service_policy_response = ranger_api_functions.get_ranger_service_default_policy( ranger_admin_url, params.ranger_atlas_service_name, format("{ranger_admin_user}:{ranger_admin_passwd}"), ['atlas-service']) if atlas_service_policy_response: beacon_atlas_user_present = ranger_api_functions.check_user_policy( atlas_service_policy_response, get_beacon_atlas_user) if not beacon_atlas_user_present: # Updating beacon atlas user in Ranger Atlas default policy for service resource atlas_service_policy_id = atlas_service_policy_response['id'] beacon_atlas_user_policy_item = {'groups': [], 'conditions': [], 'users': [get_beacon_atlas_user], 'accesses': [ {'type': 'admin-export', 'isAllowed': True}, {'type': 'admin-import', 'isAllowed': True}]} atlas_service_policy_data = ranger_api_functions.update_policy_item( atlas_service_policy_response, beacon_atlas_user_policy_item) atlas_service_policy_update_response = ranger_api_functions.update_policy( ranger_admin_url, atlas_service_policy_id, atlas_service_policy_data, format("{ranger_admin_user}:{ranger_admin_passwd}")) # Get Ranger Atlas default policy for TYPE CATEGORY and TYPE resource atlas_type_category_policy_response = ranger_api_functions.get_ranger_service_default_policy( ranger_admin_url, params.ranger_atlas_service_name, format("{ranger_admin_user}:{ranger_admin_passwd}"), ['type', 'type-category']) if atlas_type_category_policy_response: beacon_atlas_user_present = ranger_api_functions.check_user_policy( atlas_type_category_policy_response, get_beacon_atlas_user) if not beacon_atlas_user_present: # Updating beacon atlas user in Ranger Atlas default policy for type category and type resource atlas_type_category_policy_id = atlas_type_category_policy_response['id'] beacon_atlas_user_policy_item = {'groups': [], 'conditions': [], 'users': [get_beacon_atlas_user], 'accesses': [ {'type': 'type-create', 'isAllowed': True}, {'type': 'type-update', 'isAllowed': True}, {'type': 'type-delete', 'isAllowed': True}]} atlas_type_category_policy_data = ranger_api_functions.update_policy_item( atlas_type_category_policy_response, beacon_atlas_user_policy_item) atlas_update_type_category_policy_response = ranger_api_functions.update_policy( ranger_admin_url, atlas_type_category_policy_id, atlas_type_category_policy_data, format("{ranger_admin_user}:{ranger_admin_passwd}")) else: # Get Ranger Atlas default policy for ENTITY resource atlas_policy_response = ranger_api_functions.get_ranger_service_default_policy( ranger_admin_url, params.ranger_atlas_service_name, format("{ranger_admin_user}:{ranger_admin_passwd}"), ['entity']) if atlas_policy_response: beacon_atlas_user_present = ranger_api_functions.check_user_policy( atlas_policy_response, get_beacon_atlas_user) if not beacon_atlas_user_present: # Updating beacon atlas user in Ranger Atlas default policy for entity resource atlas_policy_id = atlas_policy_response['id'] beacon_atlas_user_policy_item = {'groups': [], 'conditions': [], 'users': [get_beacon_atlas_user], 'accesses': [{'type': 'read', 'isAllowed': True}, {'type': 'create', 'isAllowed': True}, {'type': 'update', 'isAllowed': True}, {'type': 'delete', 'isAllowed': True}, {'type': 'all', 'isAllowed': True}]} atlas_policy_data = ranger_api_functions.update_policy_item(atlas_policy_response, beacon_atlas_user_policy_item) atlas_update_policy_response = ranger_api_functions.update_policy(ranger_admin_url, atlas_policy_id, atlas_policy_data, format( "{ranger_admin_user}:{ranger_admin_passwd}")) # Get Ranger Atlas default policy for OPERATION resource atlas_operation_policy_response = ranger_api_functions.get_ranger_service_default_policy( ranger_admin_url, params.ranger_atlas_service_name, format("{ranger_admin_user}:{ranger_admin_passwd}"), ['operation']) if atlas_operation_policy_response: beacon_atlas_user_present = ranger_api_functions.check_user_policy( atlas_operation_policy_response, get_beacon_atlas_user) if not beacon_atlas_user_present: # Updating beacon atlas user in Ranger Atlas default policy for operation resource atlas_operation_policy_id = atlas_operation_policy_response['id'] beacon_atlas_user_policy_item = {'groups': [], 'conditions': [], 'users': [get_beacon_atlas_user], 'accesses': [{'type': 'read', 'isAllowed': True}, {'type': 'create', 'isAllowed': True}, {'type': 'update', 'isAllowed': True}, {'type': 'delete', 'isAllowed': True}, {'type': 'all', 'isAllowed': True}]} atlas_operation_policy_data = ranger_api_functions.update_policy_item( atlas_operation_policy_response, beacon_atlas_user_policy_item) atlas_operation_policy_update_response = ranger_api_functions.update_policy( ranger_admin_url, atlas_operation_policy_id, atlas_operation_policy_data, format("{ranger_admin_user}:{ranger_admin_passwd}")) except Exception as e: show_logs(params.beacon_log_dir, params.beacon_user) if action == 'stop': try: Execute(format('{beacon_home}/bin/beacon stop'), user=params.beacon_user, path=params.hadoop_bin_dir, environment=environment_dictionary) except: show_logs(params.beacon_log_dir, params.beacon_user) File(params.server_pid_file, action='delete') def create_directory(directory, scheme=None): import params if (scheme is None or scheme == ''): if params.is_hdfs_installed: scheme = 'hdfs' else: scheme = 'file' Logger.info("Creating directory {0}:/{1}".format(scheme, directory)) if scheme == 'file': Directory(directory, owner=params.beacon_user, create_parents=True, mode=0755, cd_access="a") elif scheme == 'hdfs': beacon_utils.create_hdfs_directory(directory, params.beacon_user, 0775) params.HdfsResource(None, action="execute") def download_mysql_driver(): import params if params.jdbc_jar_name is None: raise Fail("Mysql JDBC driver not installed on ambari-server") File( params.mysql_driver_target, content=DownloadSource(params.driver_source), mode=0644 )
stacks/XIAOMATECH/1.0/services/BEACON/package/scripts/beacon.py
30,656
Creating/Updating beacon.ranger.user with role "ROLE_SYS_ADMIN" Updating beacon_user role depending upon cluster environment delay for 10 seconds Get Ranger Hive default policy for resource database, table, column Get Ranger Hive default policy for resource hiveservice Updating beacon_user in Ranger Hive default policy for resource hiveservice Creating beacon.atlas.user with role "ROLE_USER" Get Ranger Atlas default policy for ENTITY TYPE, ENTITY CLASSIFICATION and ENTITY ID resource Updating beacon atlas user in Ranger Atlas default policy for entity resource Get Ranger Atlas default policy for ATLAS SERVICE resource Updating beacon atlas user in Ranger Atlas default policy for service resource Get Ranger Atlas default policy for TYPE CATEGORY and TYPE resource Updating beacon atlas user in Ranger Atlas default policy for type category and type resource Get Ranger Atlas default policy for ENTITY resource Updating beacon atlas user in Ranger Atlas default policy for entity resource Get Ranger Atlas default policy for OPERATION resource Updating beacon atlas user in Ranger Atlas default policy for operation resource
1,132
en
0.501861
import pandas as pd data=pd.read_csv("C:/Users/user/Documents/API_NY.GDP.PCAP.CD_DS2_en_csv_v2_1068945.csv") #your raw data obtained from world bank import pandas as pd import matplotlib.pyplot as plt fulldataonly=data.dropna() listofcountry=fulldataonly['Country Name'] listofcountry=list(listofcountry) def findcountryrow(country): for i in range(len(data['Country Name'])): if data['Country Name'][i]==country: return i else: print("error, country not found") # find which row is the country listyear=list(range(1960,2018)) x=[] y=[] mydata=[] #for country in range(len(listofcountry)): # for year in listyear: # y0=data.loc[findcountryrow(listofcountry[country]),str(year)] # y1=data.loc[findcountryrow(listofcountry[country]),str(year+1)] # delta=(y1-y0)/y0 # x.append(y0) # y.append(delta) # mydata.append([y0,delta]) fulllistofcountry=data['Country Name'] fulllistofcountry=list(fulllistofcountry) for country in range(len(fulllistofcountry)): for year in listyear: if (pd.notnull(data.loc[country,str(year)]))&(pd.notnull(data.loc[country,str(year+1)])): y0=data.loc[country,str(year)] y1=data.loc[country,str(year+1)] delta=((y1-y0)/y0)*100 x.append(y0) y.append(delta) mydata.append([y0,delta]) mydata.sort(key=lambda x: x[0]) count=0 GDP, myGDP=[],[] Growth, myGrowth=[],[] mysd=[] naverage=500 averagedatax,averagedatay=[],[] import statistics as s for i in range(len(mydata)): if count<naverage: GDP.append(mydata[i][0]) Growth.append(mydata[i][1]) count+=1 if count==naverage: myGDP=s.mean(GDP) myGrowth=s.mean(Growth) mysd.append(s.stdev(Growth)) averagedatax.append(myGDP) averagedatay.append(myGrowth) count=0 GDP=[] Growth=[] if i==len(mydata)-1: myGDP=s.mean(GDP) myGrowth=s.mean(Growth) mysd.append(s.stdev(Growth)) averagedatax.append(myGDP) averagedatay.append(myGrowth) plt.xscale('log') plt.xlim(100,200000) plt.xlabel(' GDP per capita in US dollar',size=15) plt.ylabel('GDP growth rate %',size=15) plt.title('Dependence of Economic Growth Rate with GDP per capita',size=15) plt.scatter(averagedatax,averagedatay) # histogram=mydata[0:1800] # per=[] # for gdp, percentage in histogram: # per.append(percentage) # plt.xlim(-50,60) # plt.xlabel('GDP per capita Growth %',size=15) # plt.ylabel('Density Function',size=15) # plt.title('Economic Growth for different countries for 1960-2018', size=15) # plt.hist(x=per, bins='auto', density=True)
Economic Growth & GDP per capita.py
2,722
your raw data obtained from world bank find which row is the countryfor country in range(len(listofcountry)): for year in listyear: y0=data.loc[findcountryrow(listofcountry[country]),str(year)] y1=data.loc[findcountryrow(listofcountry[country]),str(year+1)] delta=(y1-y0)/y0 x.append(y0) y.append(delta) mydata.append([y0,delta]) histogram=mydata[0:1800] per=[] for gdp, percentage in histogram: per.append(percentage) plt.xlim(-50,60) plt.xlabel('GDP per capita Growth %',size=15) plt.ylabel('Density Function',size=15) plt.title('Economic Growth for different countries for 1960-2018', size=15) plt.hist(x=per, bins='auto', density=True)
688
en
0.274347
# Monster Hot Air Balloon | (2435553) if sm.getSkillByItem() == 0:# Check whether item has an vehicleID stored, 0 if false. sm.chat("An Error occurred whilst trying to find the mount.") elif sm.hasSkill(sm.getSkillByItem()): sm.chat("You already have the 'Monster Hot Air Balloon' mount.") else: sm.consumeItem() sm.giveSkill(sm.getSkillByItem()) sm.chat("Successfully added the 'Monster Hot Air Balloon' mount.") sm.dispose()
scripts/item/consume_2435553.py
450
Monster Hot Air Balloon | (2435553) Check whether item has an vehicleID stored, 0 if false.
94
en
0.488993
# this works around a path issue with just calling # coverage run -m doctest -v <rst-file> import doctest import sys fails = 0 for filename in [ "tuples.rst", "functions.rst", "symbolic.rst", "simplification.rst", "differentiation.rst", "symbolic_tuples.rst", ]: result = doctest.testfile(filename) fails += result.failed if fails: sys.exit(1)
run_doctests.py
384
this works around a path issue with just calling coverage run -m doctest -v <rst-file>
86
en
0.916185
""" The various serializers. Pyro - Python Remote Objects. Copyright by Irmen de Jong ([email protected]). """ import array import builtins import uuid import logging import struct import datetime import decimal import numbers import inspect import marshal import json import serpent import msgpack from . import errors __all__ = ["SerializerBase", "SerpentSerializer", "JsonSerializer", "MarshalSerializer", "MsgpackSerializer", "serializers", "serializers_by_id"] log = logging.getLogger("Pyro5.serializers") if '-' in serpent.__version__: ver = serpent.__version__.split('-', 1)[0] else: ver = serpent.__version__ ver = tuple(map(int, ver.split("."))) if ver < (1, 27): raise RuntimeError("requires serpent 1.27 or better") if msgpack.version < (0, 5, 2): raise RuntimeError("requires msgpack 0.5.2 or better") all_exceptions = {} for name, t in vars(builtins).items(): if type(t) is type and issubclass(t, BaseException): all_exceptions[name] = t for name, t in vars(errors).items(): if type(t) is type and issubclass(t, errors.PyroError): all_exceptions[name] = t def pyro_class_serpent_serializer(obj, serializer, stream, level): # Override the default way that a Pyro URI/proxy/daemon is serialized. # Because it defines a __getstate__ it would otherwise just become a tuple, # and not be deserialized as a class. d = SerializerBase.class_to_dict(obj) serializer.ser_builtins_dict(d, stream, level) def serialize_pyro_object_to_dict(obj): return { "__class__": "{:s}.{:s}".format(obj.__module__, obj.__class__.__name__), "state": obj.__getstate__() } class SerializerBase(object): """Base class for (de)serializer implementations (which must be thread safe)""" serializer_id = 0 # define uniquely in subclass __custom_class_to_dict_registry = {} __custom_dict_to_class_registry = {} def loads(self, data): raise NotImplementedError("implement in subclass") def loadsCall(self, data): raise NotImplementedError("implement in subclass") def dumps(self, data): raise NotImplementedError("implement in subclass") def dumpsCall(self, obj, method, vargs, kwargs): raise NotImplementedError("implement in subclass") @classmethod def register_type_replacement(cls, object_type, replacement_function): raise NotImplementedError("implement in subclass") def _convertToBytes(self, data): if type(data) is bytearray: return bytes(data) if type(data) is memoryview: return data.tobytes() return data @classmethod def register_class_to_dict(cls, clazz, converter, serpent_too=True): """Registers a custom function that returns a dict representation of objects of the given class. The function is called with a single parameter; the object to be converted to a dict.""" cls.__custom_class_to_dict_registry[clazz] = converter if serpent_too: try: def serpent_converter(obj, serializer, stream, level): d = converter(obj) serializer.ser_builtins_dict(d, stream, level) serpent.register_class(clazz, serpent_converter) except errors.ProtocolError: pass @classmethod def unregister_class_to_dict(cls, clazz): """Removes the to-dict conversion function registered for the given class. Objects of the class will be serialized by the default mechanism again.""" if clazz in cls.__custom_class_to_dict_registry: del cls.__custom_class_to_dict_registry[clazz] try: serpent.unregister_class(clazz) except errors.ProtocolError: pass @classmethod def register_dict_to_class(cls, classname, converter): """ Registers a custom converter function that creates objects from a dict with the given classname tag in it. The function is called with two parameters: the classname and the dictionary to convert to an instance of the class. """ cls.__custom_dict_to_class_registry[classname] = converter @classmethod def unregister_dict_to_class(cls, classname): """ Removes the converter registered for the given classname. Dicts with that classname tag will be deserialized by the default mechanism again. """ if classname in cls.__custom_dict_to_class_registry: del cls.__custom_dict_to_class_registry[classname] @classmethod def class_to_dict(cls, obj): """ Convert a non-serializable object to a dict. Partly borrowed from serpent. """ for clazz in cls.__custom_class_to_dict_registry: if isinstance(obj, clazz): return cls.__custom_class_to_dict_registry[clazz](obj) if type(obj) in (set, dict, tuple, list): # we use a ValueError to mirror the exception type returned by serpent and other serializers raise ValueError("can't serialize type " + str(obj.__class__) + " into a dict") if hasattr(obj, "_pyroDaemon"): obj._pyroDaemon = None if isinstance(obj, BaseException): # special case for exceptions return { "__class__": obj.__class__.__module__ + "." + obj.__class__.__name__, "__exception__": True, "args": obj.args, "attributes": vars(obj) # add custom exception attributes } try: value = obj.__getstate__() except AttributeError: pass else: if isinstance(value, dict): return value try: value = dict(vars(obj)) # make sure we can serialize anything that resembles a dict value["__class__"] = obj.__class__.__module__ + "." + obj.__class__.__name__ return value except TypeError: if hasattr(obj, "__slots__"): # use the __slots__ instead of the vars dict value = {} for slot in obj.__slots__: value[slot] = getattr(obj, slot) value["__class__"] = obj.__class__.__module__ + "." + obj.__class__.__name__ return value else: raise errors.SerializeError("don't know how to serialize class " + str(obj.__class__) + " using serializer " + str(cls.__name__) + ". Give it vars() or an appropriate __getstate__") @classmethod def dict_to_class(cls, data): """ Recreate an object out of a dict containing the class name and the attributes. Only a fixed set of classes are recognized. """ from . import core, client, server # XXX circular classname = data.get("__class__", "<unknown>") if isinstance(classname, bytes): classname = classname.decode("utf-8") if classname in cls.__custom_dict_to_class_registry: converter = cls.__custom_dict_to_class_registry[classname] return converter(classname, data) if "__" in classname: raise errors.SecurityError("refused to deserialize types with double underscores in their name: " + classname) # for performance, the constructors below are hardcoded here instead of added on a per-class basis to the dict-to-class registry if classname == "Pyro5.core.URI": uri = core.URI.__new__(core.URI) uri.__setstate__(data["state"]) return uri elif classname == "Pyro5.client.Proxy": proxy = client.Proxy.__new__(client.Proxy) proxy.__setstate__(data["state"]) return proxy elif classname == "Pyro5.server.Daemon": daemon = server.Daemon.__new__(server.Daemon) daemon.__setstate__(data["state"]) return daemon elif classname.startswith("Pyro5.util."): if classname == "Pyro5.util.SerpentSerializer": return SerpentSerializer() elif classname == "Pyro5.util.MarshalSerializer": return MarshalSerializer() elif classname == "Pyro5.util.JsonSerializer": return JsonSerializer() elif classname == "Pyro5.util.MsgpackSerializer": return MsgpackSerializer() elif classname.startswith("Pyro5.errors."): errortype = getattr(errors, classname.split('.', 2)[2]) if issubclass(errortype, errors.PyroError): return SerializerBase.make_exception(errortype, data) elif classname == "struct.error": return SerializerBase.make_exception(struct.error, data) elif classname == "Pyro5.core._ExceptionWrapper": ex = data["exception"] if isinstance(ex, dict) and "__class__" in ex: ex = SerializerBase.dict_to_class(ex) return core._ExceptionWrapper(ex) elif data.get("__exception__", False): if classname in all_exceptions: return SerializerBase.make_exception(all_exceptions[classname], data) # python 2.x: exceptions.ValueError # python 3.x: builtins.ValueError # translate to the appropriate namespace... namespace, short_classname = classname.split('.', 1) if namespace in ("builtins", "exceptions"): exceptiontype = getattr(builtins, short_classname) if issubclass(exceptiontype, BaseException): return SerializerBase.make_exception(exceptiontype, data) elif namespace == "sqlite3" and short_classname.endswith("Error"): import sqlite3 exceptiontype = getattr(sqlite3, short_classname) if issubclass(exceptiontype, BaseException): return SerializerBase.make_exception(exceptiontype, data) log.warning("unsupported serialized class: " + classname) raise errors.SerializeError("unsupported serialized class: " + classname) @staticmethod def make_exception(exceptiontype, data): ex = exceptiontype(*data["args"]) if "attributes" in data: # restore custom attributes on the exception object for attr, value in data["attributes"].items(): setattr(ex, attr, value) return ex def recreate_classes(self, literal): t = type(literal) if t is set: return {self.recreate_classes(x) for x in literal} if t is list: return [self.recreate_classes(x) for x in literal] if t is tuple: return tuple(self.recreate_classes(x) for x in literal) if t is dict: if "__class__" in literal: return self.dict_to_class(literal) result = {} for key, value in literal.items(): result[key] = self.recreate_classes(value) return result return literal def __eq__(self, other): """this equality method is only to support the unit tests of this class""" return isinstance(other, SerializerBase) and vars(self) == vars(other) def __ne__(self, other): return not self.__eq__(other) __hash__ = object.__hash__ class SerpentSerializer(SerializerBase): """(de)serializer that wraps the serpent serialization protocol.""" serializer_id = 1 # never change this def dumpsCall(self, obj, method, vargs, kwargs): return serpent.dumps((obj, method, vargs, kwargs), module_in_classname=True) def dumps(self, data): return serpent.dumps(data, module_in_classname=True) def loadsCall(self, data): obj, method, vargs, kwargs = serpent.loads(data) vargs = self.recreate_classes(vargs) kwargs = self.recreate_classes(kwargs) return obj, method, vargs, kwargs def loads(self, data): return self.recreate_classes(serpent.loads(data)) @classmethod def register_type_replacement(cls, object_type, replacement_function): def custom_serializer(obj, serpent_serializer, outputstream, indentlevel): replaced = replacement_function(obj) if replaced is obj: serpent_serializer.ser_default_class(replaced, outputstream, indentlevel) else: serpent_serializer._serialize(replaced, outputstream, indentlevel) if object_type is type or not inspect.isclass(object_type): raise ValueError("refusing to register replacement for a non-type or the type 'type' itself") serpent.register_class(object_type, custom_serializer) @classmethod def dict_to_class(cls, data): if data.get("__class__") == "float": return float(data["value"]) # serpent encodes a float nan as a special class dict like this return super(SerpentSerializer, cls).dict_to_class(data) class MarshalSerializer(SerializerBase): """(de)serializer that wraps the marshal serialization protocol.""" serializer_id = 2 # never change this def dumpsCall(self, obj, method, vargs, kwargs): vargs = [self.convert_obj_into_marshallable(value) for value in vargs] kwargs = {key: self.convert_obj_into_marshallable(value) for key, value in kwargs.items()} return marshal.dumps((obj, method, vargs, kwargs)) def dumps(self, data): return marshal.dumps(self.convert_obj_into_marshallable(data)) def loadsCall(self, data): data = self._convertToBytes(data) obj, method, vargs, kwargs = marshal.loads(data) vargs = self.recreate_classes(vargs) kwargs = self.recreate_classes(kwargs) return obj, method, vargs, kwargs def loads(self, data): data = self._convertToBytes(data) return self.recreate_classes(marshal.loads(data)) def convert_obj_into_marshallable(self, obj): marshalable_types = (str, int, float, type(None), bool, complex, bytes, bytearray, tuple, set, frozenset, list, dict) if isinstance(obj, array.array): if obj.typecode == 'c': return obj.tostring() if obj.typecode == 'u': return obj.tounicode() return obj.tolist() if isinstance(obj, marshalable_types): return obj return self.class_to_dict(obj) @classmethod def class_to_dict(cls, obj): if isinstance(obj, uuid.UUID): return str(obj) return super(MarshalSerializer, cls).class_to_dict(obj) @classmethod def register_type_replacement(cls, object_type, replacement_function): pass # marshal serializer doesn't support per-type hooks class JsonSerializer(SerializerBase): """(de)serializer that wraps the json serialization protocol.""" serializer_id = 3 # never change this __type_replacements = {} def dumpsCall(self, obj, method, vargs, kwargs): data = {"object": obj, "method": method, "params": vargs, "kwargs": kwargs} data = json.dumps(data, ensure_ascii=False, default=self.default) return data.encode("utf-8") def dumps(self, data): data = json.dumps(data, ensure_ascii=False, default=self.default) return data.encode("utf-8") def loadsCall(self, data): data = self._convertToBytes(data).decode("utf-8") data = json.loads(data) vargs = self.recreate_classes(data["params"]) kwargs = self.recreate_classes(data["kwargs"]) return data["object"], data["method"], vargs, kwargs def loads(self, data): data = self._convertToBytes(data).decode("utf-8") return self.recreate_classes(json.loads(data)) def default(self, obj): replacer = self.__type_replacements.get(type(obj), None) if replacer: obj = replacer(obj) if isinstance(obj, set): return tuple(obj) # json module can't deal with sets so we make a tuple out of it if isinstance(obj, uuid.UUID): return str(obj) if isinstance(obj, (datetime.datetime, datetime.date)): return obj.isoformat() if isinstance(obj, decimal.Decimal): return str(obj) if isinstance(obj, array.array): if obj.typecode == 'c': return obj.tostring() if obj.typecode == 'u': return obj.tounicode() return obj.tolist() return self.class_to_dict(obj) @classmethod def register_type_replacement(cls, object_type, replacement_function): if object_type is type or not inspect.isclass(object_type): raise ValueError("refusing to register replacement for a non-type or the type 'type' itself") cls.__type_replacements[object_type] = replacement_function class MsgpackSerializer(SerializerBase): """(de)serializer that wraps the msgpack serialization protocol.""" serializer_id = 4 # never change this __type_replacements = {} def dumpsCall(self, obj, method, vargs, kwargs): return msgpack.packb((obj, method, vargs, kwargs), use_bin_type=True, default=self.default) def dumps(self, data): return msgpack.packb(data, use_bin_type=True, default=self.default) def loadsCall(self, data): return msgpack.unpackb(self._convertToBytes(data), raw=False, object_hook=self.object_hook) def loads(self, data): return msgpack.unpackb(self._convertToBytes(data), raw=False, object_hook=self.object_hook, ext_hook=self.ext_hook) def default(self, obj): replacer = self.__type_replacements.get(type(obj), None) if replacer: obj = replacer(obj) if isinstance(obj, set): return tuple(obj) # msgpack module can't deal with sets so we make a tuple out of it if isinstance(obj, uuid.UUID): return str(obj) if isinstance(obj, bytearray): return bytes(obj) if isinstance(obj, complex): return msgpack.ExtType(0x30, struct.pack("dd", obj.real, obj.imag)) if isinstance(obj, datetime.datetime): if obj.tzinfo: raise errors.SerializeError("msgpack cannot serialize datetime with timezone info") return msgpack.ExtType(0x32, struct.pack("d", obj.timestamp())) if isinstance(obj, datetime.date): return msgpack.ExtType(0x33, struct.pack("l", obj.toordinal())) if isinstance(obj, decimal.Decimal): return str(obj) if isinstance(obj, numbers.Number): return msgpack.ExtType(0x31, str(obj).encode("ascii")) # long if isinstance(obj, array.array): if obj.typecode == 'c': return obj.tostring() if obj.typecode == 'u': return obj.tounicode() return obj.tolist() return self.class_to_dict(obj) def object_hook(self, obj): if "__class__" in obj: return self.dict_to_class(obj) return obj def ext_hook(self, code, data): if code == 0x30: real, imag = struct.unpack("dd", data) return complex(real, imag) if code == 0x31: return int(data) if code == 0x32: return datetime.datetime.fromtimestamp(struct.unpack("d", data)[0]) if code == 0x33: return datetime.date.fromordinal(struct.unpack("l", data)[0]) raise errors.SerializeError("invalid ext code for msgpack: " + str(code)) @classmethod def register_type_replacement(cls, object_type, replacement_function): if object_type is type or not inspect.isclass(object_type): raise ValueError("refusing to register replacement for a non-type or the type 'type' itself") cls.__type_replacements[object_type] = replacement_function """The various serializers that are supported""" serializers = { "serpent": SerpentSerializer(), "marshal": MarshalSerializer(), "json": JsonSerializer(), "msgpack": MsgpackSerializer() } """The available serializers by their internal id""" serializers_by_id = {ser.serializer_id: ser for ser in serializers.values()}
Pyro5/serializers.py
20,446
(de)serializer that wraps the json serialization protocol. (de)serializer that wraps the marshal serialization protocol. (de)serializer that wraps the msgpack serialization protocol. Base class for (de)serializer implementations (which must be thread safe) (de)serializer that wraps the serpent serialization protocol. this equality method is only to support the unit tests of this class Convert a non-serializable object to a dict. Partly borrowed from serpent. Recreate an object out of a dict containing the class name and the attributes. Only a fixed set of classes are recognized. Registers a custom function that returns a dict representation of objects of the given class. The function is called with a single parameter; the object to be converted to a dict. Registers a custom converter function that creates objects from a dict with the given classname tag in it. The function is called with two parameters: the classname and the dictionary to convert to an instance of the class. Removes the to-dict conversion function registered for the given class. Objects of the class will be serialized by the default mechanism again. Removes the converter registered for the given classname. Dicts with that classname tag will be deserialized by the default mechanism again. The various serializers. Pyro - Python Remote Objects. Copyright by Irmen de Jong ([email protected]). Override the default way that a Pyro URI/proxy/daemon is serialized. Because it defines a __getstate__ it would otherwise just become a tuple, and not be deserialized as a class. define uniquely in subclass we use a ValueError to mirror the exception type returned by serpent and other serializers special case for exceptions add custom exception attributes make sure we can serialize anything that resembles a dict use the __slots__ instead of the vars dict XXX circular for performance, the constructors below are hardcoded here instead of added on a per-class basis to the dict-to-class registry python 2.x: exceptions.ValueError python 3.x: builtins.ValueError translate to the appropriate namespace... restore custom attributes on the exception object never change this serpent encodes a float nan as a special class dict like this never change this marshal serializer doesn't support per-type hooks never change this json module can't deal with sets so we make a tuple out of it never change this msgpack module can't deal with sets so we make a tuple out of it long
2,456
en
0.808405
# Exercício número 3 da lista n1 = int(input('DIgite um valor:')) n2 = int(input('Digite outro valor:')) soma = n1+n2 print('A soma entre {} e {} é {}'.format(n1, n2, soma))
Mundo 1/Ex003 - soma.py
176
Exercício número 3 da lista
27
pt
0.974302
""" Meta thinking: python objects & introspection usefull documentation: http://python-3-patterns-idioms-test.readthedocs.org/en/latest/Metaprogramming.html """ import inspect import pkgutil from importlib import import_module from types import ModuleType from typing import Any, Callable, Dict, List, Optional, Type from restapi.config import BACKEND_PACKAGE, CUSTOM_PACKAGE from restapi.utilities import print_and_exit from restapi.utilities.logs import log class Meta: """Utilities with meta in mind""" @staticmethod def get_classes_from_module(module: ModuleType) -> Dict[str, Type[Any]]: """ Find classes inside a python module file. """ try: return { name: cls for name, cls in module.__dict__.items() if isinstance(cls, type) } except AttributeError: log.warning("Could not find any class in module {}", module) return {} @staticmethod def get_new_classes_from_module(module: ModuleType) -> Dict[str, Type[Any]]: """ Skip classes not originated inside the module. """ classes = {} for name, value in Meta.get_classes_from_module(module).items(): if module.__name__ in value.__module__: classes[name] = value return classes # Should return `from types import ModuleType` -> Optional[ModuleType] @staticmethod def get_module_from_string( modulestring: str, exit_on_fail: bool = False ) -> Optional[ModuleType]: """ Getting a module import when your module is stored as a string in a variable """ try: return import_module(modulestring) except ModuleNotFoundError as e: if exit_on_fail: log.error(e) raise e return None except Exception as e: # pragma: no cover if exit_on_fail: log.error(e) raise e log.error("Module {} not found.\nError: {}", modulestring, e) return None @staticmethod def get_self_reference_from_args(*args: Any) -> Optional[Any]: """ Useful in decorators: being able to call the internal method by getting the 'self' reference from the decorated method (when it's there) """ if len(args) > 0: candidate_as_self = args[0] cls_attribute = getattr(candidate_as_self, "__class__", None) if cls_attribute is not None and inspect.isclass(cls_attribute): return args[0] return None @staticmethod def import_models( name: str, package: str, mandatory: bool = False ) -> Dict[str, Type[Any]]: if package == BACKEND_PACKAGE: module_name = f"{package}.connectors.{name}.models" else: module_name = f"{package}.models.{name}" try: module = Meta.get_module_from_string(module_name, exit_on_fail=True) except Exception as e: module = None if mandatory: log.critical(e) if not module: if mandatory: print_and_exit("Cannot load {} models from {}", name, module_name) return {} return Meta.get_new_classes_from_module(module) @staticmethod def get_celery_tasks(package_name: str) -> List[Callable[..., Any]]: """ Extract all celery tasks from a module. Celery tasks are functions decorated by @CeleryExt.celery_app.task(...) This decorator transform the function into a class child of celery.local.PromiseProxy """ tasks: List[Callable[..., Any]] = [] # package = tasks folder package = Meta.get_module_from_string(package_name) if package is None: return tasks # get all modules in package (i.e. py files) path = package.__path__ for _, module_name, ispkg in pkgutil.iter_modules(path): # skip modules (i.e. subfolders) if ispkg: # pragma: no cover continue module_path = f"{package_name}.{module_name}" log.debug("Loading module '{}'", module_path) # convert file name in submodule, i.e. # tasks.filename submodule = Meta.get_module_from_string( module_path, exit_on_fail=True, ) # get all functions in py file functions = inspect.getmembers(submodule) for func in functions: obj_type = type(func[1]) if obj_type.__module__ != "celery.local": continue # This was a dict name => func # tasks[func[0]] = func[1] # Now it is a list tasks.append(func[1]) return tasks @staticmethod def get_class(module_relpath: str, class_name: str) -> Optional[Any]: abspath = f"{CUSTOM_PACKAGE}.{module_relpath}" module = Meta.get_module_from_string(abspath) if module is None: log.debug("{} path does not exist", abspath) return None if not hasattr(module, class_name): return None return getattr(module, class_name)
restapi/utilities/meta.py
5,411
Utilities with meta in mind Extract all celery tasks from a module. Celery tasks are functions decorated by @CeleryExt.celery_app.task(...) This decorator transform the function into a class child of celery.local.PromiseProxy Find classes inside a python module file. Getting a module import when your module is stored as a string in a variable Skip classes not originated inside the module. Useful in decorators: being able to call the internal method by getting the 'self' reference from the decorated method (when it's there) Meta thinking: python objects & introspection usefull documentation: http://python-3-patterns-idioms-test.readthedocs.org/en/latest/Metaprogramming.html Should return `from types import ModuleType` -> Optional[ModuleType] pragma: no cover package = tasks folder get all modules in package (i.e. py files) skip modules (i.e. subfolders) pragma: no cover convert file name in submodule, i.e. tasks.filename get all functions in py file This was a dict name => func tasks[func[0]] = func[1] Now it is a list
1,036
en
0.721413
from core.himesis import Himesis, HimesisPreConditionPatternLHS import uuid class HUnitR06_IsolatedLHS(HimesisPreConditionPatternLHS): def __init__(self): """ Creates the himesis graph representing the AToM3 model HUnitR06_IsolatedLHS """ # Flag this instance as compiled now self.is_compiled = True super(HUnitR06_IsolatedLHS, self).__init__(name='HUnitR06_IsolatedLHS', num_nodes=0, edges=[]) # Add the edges self.add_edges([]) # Set the graph attributes self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule'] self["MT_constraint__"] = """return True""" self["name"] = """""" self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HUnitR06_IsolatedLHS') self["equations"] = [] # Set the node attributes # match class PhysicalNode(6.0.m.0PhysicalNode) node self.add_node() self.vs[0]["MT_pre__attr1"] = """return True""" self.vs[0]["MT_label__"] = """1""" self.vs[0]["mm__"] = """MT_pre__PhysicalNode""" self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'6.0.m.0PhysicalNode') # match class Partition(6.0.m.1Partition) node self.add_node() self.vs[1]["MT_pre__attr1"] = """return True""" self.vs[1]["MT_label__"] = """2""" self.vs[1]["mm__"] = """MT_pre__Partition""" self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'6.0.m.1Partition') # define evaluation methods for each apply class. def eval_attr11(self, attr_value, this): return True def eval_attr12(self, attr_value, this): return True def constraint(self, PreNode, graph): return True
GM2AUTOSAR_MM/Properties/unit_contracts/HUnitR06_IsolatedLHS.py
1,526
Creates the himesis graph representing the AToM3 model HUnitR06_IsolatedLHS Flag this instance as compiled now Add the edges Set the graph attributes Set the node attributes match class PhysicalNode(6.0.m.0PhysicalNode) node match class Partition(6.0.m.1Partition) node define evaluation methods for each apply class.
319
en
0.666788
# Copyright (C) 2018-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np from openvino.tools.mo.ops.eye import MXEye from openvino.tools.mo.front.extractor import FrontExtractorOp from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs class EyeExtractor(FrontExtractorOp): op = '_eye' enabled = True @classmethod def extract(cls, node): attrs = get_mxnet_layer_attrs(node.symbol_dict) num_rows = attrs.int("N") num_columns = attrs.int("M", num_rows) if num_columns is None or num_columns == 0: num_columns = num_rows diagonal_index = attrs.int("k", 0) out_type = attrs.dtype("dtype", np.float32) new_attrs = {'num_rows': num_rows, 'num_columns': num_columns, 'diagonal_index': diagonal_index, 'output_type': out_type} MXEye.update_node_stat(node, new_attrs) return cls.enabled
tools/mo/openvino/tools/mo/front/mxnet/eye_ext.py
936
Copyright (C) 2018-2022 Intel Corporation SPDX-License-Identifier: Apache-2.0
77
en
0.237368
__copyright__ = """ Copyright (C) 2009-2017 Andreas Kloeckner Copyright (C) 2014-2017 Aaron Meurer """ __license__ = """ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import urwid import bdb import gc import os import sys from itertools import count from functools import partial from types import TracebackType from pudb.lowlevel import decode_lines, ui_log from pudb.settings import load_config, save_config CONFIG = load_config() save_config(CONFIG) HELP_HEADER = r""" Key Assignments: Use Arrow Down/Up or Page Down/Up to scroll. """ HELP_MAIN = r""" Keys: Ctrl-p - edit preferences n - step over ("next") s - step into c - continue r/f - finish current function t - run to cursor e - show traceback [post-mortem or in exception state] b - set/clear breakpoint Ctrl-e - open file at current line to edit with $EDITOR H - move to current line (bottom of stack) u - move up one stack frame d - move down one stack frame o - show console/output screen m - open module j/k - down/up l/h - right/left Ctrl-f/b - page down/up Ctrl-d/u - page down/up G/g - end/home L - show (file/line) location / go to line / - search ,/. - search next/previous V - focus variables S - focus stack B - focus breakpoint list C - focus code F1/? - show this help screen q - quit Ctrl-r - reload breakpoints from saved-breakpoints file Ctrl-c - when in continue mode, break back to PuDB Ctrl-l - redraw screen Shell-related: ! - open the external shell (configured in the settings) Ctrl-x - toggle the internal shell focus +/- - grow/shrink inline shell (active in command line history) _/= - minimize/maximize inline shell (active in command line history) Ctrl-v - insert newline Ctrl-n/p - browse command line history Tab - yes, there is (simple) tab completion """ HELP_SIDE = r""" Sidebar-related (active in sidebar): +/- - grow/shrink sidebar _/= - minimize/maximize sidebar [/] - grow/shrink relative size of active sidebar box Keys in variables list: \/enter/space - expand/collapse h - collapse l - expand d/t/r/s/i/c - show default/type/repr/str/id/custom for this variable H - toggle highlighting @ - toggle repetition at top * - cycle attribute visibility: public/_private/__dunder__ m - toggle method visibility w - toggle line wrapping n/insert - add new watch expression e - edit options (also to delete) Keys in stack list: enter - jump to frame Ctrl-e - open file at line to edit with $EDITOR Keys in breakpoints list: enter - jump to breakpoint b - toggle breakpoint d - delete breakpoint e - edit breakpoint Other keys: j/k - down/up l/h - right/left Ctrl-f/b - page down/up Ctrl-d/u - page down/up G/g - end/home V - focus variables S - focus stack B - focus breakpoint list C - focus code F1/? - show this help screen q - quit Ctrl-l - redraw screen """ HELP_LICENSE = r""" License: -------- PuDB is licensed to you under the MIT/X Consortium license: Copyright (c) 2009-16 Andreas Kloeckner and contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ # {{{ debugger interface class Debugger(bdb.Bdb): def __init__(self, stdin=None, stdout=None, term_size=None, steal_output=False, **kwargs): # Pass remaining kwargs to python debugger framework bdb.Bdb.__init__(self, **kwargs) self.ui = DebuggerUI(self, stdin=stdin, stdout=stdout, term_size=term_size) self.steal_output = steal_output self.setup_state() if steal_output: raise NotImplementedError("output stealing") from io import StringIO self.stolen_output = sys.stderr = sys.stdout = StringIO() sys.stdin = StringIO("") # avoid spurious hangs from pudb.settings import load_breakpoints for bpoint_descr in load_breakpoints(): self.set_break(*bpoint_descr) # These (dispatch_line and set_continue) are copied from bdb with the # patch from https://bugs.python.org/issue16482 applied. See # https://github.com/inducer/pudb/pull/90. def dispatch_line(self, frame): if self.stop_here(frame) or self.break_here(frame): self.user_line(frame) if self.quitting: raise bdb.BdbQuit # Do not re-install the local trace when we are finished debugging, # see issues 16482 and 7238. if not sys.gettrace(): return None return self.trace_dispatch def set_continue(self): # Don't stop except at breakpoints or when finished self._set_stopinfo(self.botframe, None, -1) if not self.breaks: # no breakpoints; run without debugger overhead sys.settrace(None) frame = sys._getframe().f_back while frame: del frame.f_trace if frame is self.botframe: break frame = frame.f_back def set_trace(self, frame=None, as_breakpoint=None, paused=True): """Start debugging from `frame`. If frame is not specified, debugging starts from caller's frame. Unlike Bdb.set_trace(), this does not call self.reset(), which causes the debugger to enter bdb source code. This also implements treating set_trace() calls as breakpoints in the PuDB UI. If as_breakpoint=True (the default), this call will be treated like a breakpoint in the UI (you can press 'b' on it to disable breaking here). If paused=False, the debugger will not break here. """ if as_breakpoint is None: if not paused: as_breakpoint = False else: as_breakpoint = True if frame is None: frame = thisframe = sys._getframe().f_back else: thisframe = frame # See pudb issue #52. If this works well enough we should upstream to # stdlib bdb.py. #self.reset() while frame: frame.f_trace = self.trace_dispatch self.botframe = frame frame = frame.f_back thisframe_info = ( self.canonic(thisframe.f_code.co_filename), thisframe.f_lineno) if thisframe_info not in self.set_traces or self.set_traces[thisframe_info]: if as_breakpoint: self.set_traces[thisframe_info] = True if self.ui.source_code_provider is not None: self.ui.set_source_code_provider( self.ui.source_code_provider, force_update=True) if paused: self.set_step() else: self.set_continue() sys.settrace(self.trace_dispatch) else: return def save_breakpoints(self): from pudb.settings import save_breakpoints save_breakpoints([ bp for fn, bp_lst in self.get_all_breaks().items() for lineno in bp_lst for bp in self.get_breaks(fn, lineno) if not bp.temporary]) def enter_post_mortem(self, exc_tuple): self.post_mortem = True def setup_state(self): self.bottom_frame = None self.mainpyfile = "" self._wait_for_mainpyfile = False self.current_bp = None self.post_mortem = False # Mapping of (filename, lineno) to bool. If True, will stop on the # set_trace() call at that location. self.set_traces = {} def restart(self): from linecache import checkcache checkcache() self.ui.set_source_code_provider(NullSourceCodeProvider()) self.setup_state() def do_clear(self, arg): self.clear_bpbynumber(int(arg)) def set_frame_index(self, index): self.curindex = index if index < 0 or index >= len(self.stack): return self.curframe, lineno = self.stack[index] filename = self.curframe.f_code.co_filename import linecache if not linecache.getlines(filename): code = self.curframe.f_globals.get("_MODULE_SOURCE_CODE") if code is not None: self.ui.set_current_line(lineno, DirectSourceCodeProvider( self.curframe.f_code.co_name, code)) else: self.ui.set_current_line(lineno, NullSourceCodeProvider()) else: self.ui.set_current_line(lineno, FileSourceCodeProvider(self, filename)) self.ui.update_var_view() self.ui.update_stack() self.ui.stack_list._w.set_focus(self.ui.translate_ui_stack_index(index)) @staticmethod def open_file_to_edit(filename, line_number): if not os.path.isfile(filename): raise FileNotFoundError(f"'{filename}' not found or is not a file.") if not line_number: line_number = 1 editor = os.environ.get("EDITOR", "nano") import subprocess subprocess.call([editor, f"+{line_number}", filename], shell=False) return filename def move_up_frame(self): if self.curindex > 0: self.set_frame_index(self.curindex-1) def move_down_frame(self): if self.curindex < len(self.stack)-1: self.set_frame_index(self.curindex+1) def get_shortened_stack(self, frame, tb): stack, index = self.get_stack(frame, tb) for i, (s_frame, lineno) in enumerate(stack): if s_frame is self.bottom_frame and index >= i: stack = stack[i:] index -= i return stack, index def interaction(self, frame, exc_tuple=None, show_exc_dialog=True): if exc_tuple is None: tb = None elif isinstance(exc_tuple, TracebackType): # For API compatibility with other debuggers, the second variable # can be a traceback object. In that case, we need to retrieve the # corresponding exception tuple. tb = exc_tuple exc, = (exc for exc in gc.get_referrers(tb) if getattr(exc, "__traceback__", None) is tb) exc_tuple = type(exc), exc, tb else: tb = exc_tuple[2] if frame is None and tb is not None: frame = tb.tb_frame found_bottom_frame = False walk_frame = frame while True: if walk_frame is self.bottom_frame: found_bottom_frame = True break if walk_frame is None: break walk_frame = walk_frame.f_back if not found_bottom_frame and not self.post_mortem: return self.stack, index = self.get_shortened_stack(frame, tb) if self.post_mortem: index = len(self.stack)-1 self.set_frame_index(index) self.ui.call_with_ui(self.ui.interaction, exc_tuple, show_exc_dialog=show_exc_dialog) def get_stack_situation_id(self): return str(id(self.stack[self.curindex][0].f_code)) def user_call(self, frame, argument_list): """This method is called when there is the remote possibility that we ever need to stop in this function.""" if self._wait_for_mainpyfile: return if self.stop_here(frame): self.interaction(frame) def user_line(self, frame): """This function is called when we stop or break at this line.""" if "__exc_tuple__" in frame.f_locals: del frame.f_locals["__exc_tuple__"] if self._wait_for_mainpyfile: if (self.mainpyfile != self.canonic(frame.f_code.co_filename) or frame.f_lineno <= 0): return self._wait_for_mainpyfile = False self.bottom_frame = frame if self.get_break(self.canonic(frame.f_code.co_filename), frame.f_lineno): self.current_bp = ( self.canonic(frame.f_code.co_filename), frame.f_lineno) else: self.current_bp = None try: self.ui.update_breakpoints() self.interaction(frame) except Exception: self.ui.show_internal_exc_dlg(sys.exc_info()) def user_return(self, frame, return_value): """This function is called when a return trap is set here.""" if frame.f_code.co_name != "<module>": frame.f_locals["__return__"] = return_value if self._wait_for_mainpyfile: if (self.mainpyfile != self.canonic(frame.f_code.co_filename) or frame.f_lineno <= 0): return self._wait_for_mainpyfile = False self.bottom_frame = frame if "__exc_tuple__" not in frame.f_locals: self.interaction(frame) def user_exception(self, frame, exc_tuple): """This function is called if an exception occurs, but only if we are to stop at or just below this level.""" frame.f_locals["__exc_tuple__"] = exc_tuple if not self._wait_for_mainpyfile: self.interaction(frame, exc_tuple) def _runscript(self, filename): # Provide separation from current __main__, which is likely # pudb.__main__ run. Preserving its namespace is not important, and # having the script share it ensures that, e.g., pickle can find # types defined there: # https://github.com/inducer/pudb/issues/331 import __main__ __main__.__dict__.clear() __main__.__dict__.update({ "__name__": "__main__", "__file__": filename, "__builtins__": __builtins__, }) # When bdb sets tracing, a number of call and line events happens # BEFORE debugger even reaches user's code (and the exact sequence of # events depends on python version). So we take special measures to # avoid stopping before we reach the main script (see user_line and # user_call for details). self._wait_for_mainpyfile = 1 self.mainpyfile = self.canonic(filename) statement = 'exec(compile(open("{}").read(), "{}", "exec"))'.format( filename, filename) # Set up an interrupt handler from pudb import set_interrupt_handler set_interrupt_handler() # Implicitly runs in the namespace of __main__. self.run(statement) def _runmodule(self, module_name): # This is basically stolen from the pdb._runmodule from CPython 3.8 # https://github.com/python/cpython/blob/a1d3be4623c8ec7069bd34ccdce336be9cdeb644/Lib/pdb.py#L1530 import runpy mod_name, mod_spec, code = runpy._get_module_details(module_name) self.mainpyfile = self.canonic(code.co_filename) import __main__ __main__.__dict__.clear() __main__.__dict__.update({ "__name__": "__main__", "__file__": self.mainpyfile, "__spec__": mod_spec, "__builtins__": __builtins__, "__package__": mod_spec.parent, "__loader__": mod_spec.loader, }) self._wait_for_mainpyfile = True self.run(code) # }}} # UI stuff -------------------------------------------------------------------- from pudb.ui_tools import make_hotkey_markup, labelled_value, \ SelectableText, SignalWrap, StackFrame, BreakpointFrame from pudb.var_view import FrameVarInfoKeeper # {{{ display setup try: import curses except ImportError: curses = None from urwid.raw_display import Screen as RawScreen try: from urwid.curses_display import Screen as CursesScreen except ImportError: CursesScreen = None class ThreadsafeScreenMixin: """A Screen subclass that doesn't crash when running from a non-main thread.""" def signal_init(self): """Initialize signal handler, ignoring errors silently.""" try: super().signal_init() except ValueError: pass def signal_restore(self): """Restore default signal handler, ignoring errors silently.""" try: super().signal_restore() except ValueError: pass class ThreadsafeRawScreen(ThreadsafeScreenMixin, RawScreen): pass class ThreadsafeFixedSizeRawScreen(ThreadsafeScreenMixin, RawScreen): def __init__(self, **kwargs): self._term_size = kwargs.pop("term_size", None) super().__init__(**kwargs) def get_cols_rows(self): if self._term_size is not None: return self._term_size else: return 80, 24 if curses is not None: class ThreadsafeCursesScreen(ThreadsafeScreenMixin, RawScreen): pass # }}} # {{{ source code providers class SourceCodeProvider: def __ne__(self, other): return not (self == other) class NullSourceCodeProvider(SourceCodeProvider): def __eq__(self, other): return type(self) == type(other) def identifier(self): return "<no source code>" def get_source_identifier(self): return None def clear_cache(self): pass def get_lines(self, debugger_ui): from pudb.source_view import SourceLine return [ SourceLine(debugger_ui, "<no source code available>"), SourceLine(debugger_ui, ""), SourceLine(debugger_ui, "If this is generated code and you would " "like the source code to show up here,"), SourceLine(debugger_ui, "add it to linecache.cache, like"), SourceLine(debugger_ui, ""), SourceLine(debugger_ui, " import linecache"), SourceLine(debugger_ui, " linecache.cache[filename] = " "(size, mtime, lines, fullname)"), SourceLine(debugger_ui, ""), SourceLine(debugger_ui, "You can also set the attribute " "_MODULE_SOURCE_CODE in the module in which this function"), SourceLine(debugger_ui, "was compiled to a string containing " "the code."), ] class FileSourceCodeProvider(SourceCodeProvider): def __init__(self, debugger, file_name): self.file_name = debugger.canonic(file_name) def __eq__(self, other): return type(self) == type(other) and self.file_name == other.file_name def identifier(self): return self.file_name def get_source_identifier(self): return self.file_name def clear_cache(self): from linecache import clearcache clearcache() def get_lines(self, debugger_ui): from pudb.source_view import SourceLine, format_source if self.file_name == "<string>": return [SourceLine(debugger_ui, self.file_name)] breakpoints = debugger_ui.debugger.get_file_breaks(self.file_name)[:] breakpoints = [lineno for lineno in breakpoints if any(bp.enabled for bp in debugger_ui.debugger.get_breaks(self.file_name, lineno))] breakpoints += [i for f, i in debugger_ui.debugger.set_traces if f == self.file_name and debugger_ui.debugger.set_traces[f, i]] try: from linecache import getlines lines = getlines(self.file_name) return format_source( debugger_ui, list(decode_lines(lines)), set(breakpoints)) except Exception: from pudb.lowlevel import format_exception debugger_ui.message("Could not load source file '{}':\n\n{}".format( self.file_name, "".join(format_exception(sys.exc_info()))), title="Source Code Load Error") return [SourceLine(debugger_ui, "Error while loading '%s'." % self.file_name)] class DirectSourceCodeProvider(SourceCodeProvider): def __init__(self, func_name, code): self.function_name = func_name self.code = code def __eq__(self, other): return ( type(self) == type(other) and self.function_name == other.function_name and self.code is other.code) def identifier(self): return "<source code of function %s>" % self.function_name def get_source_identifier(self): return None def clear_cache(self): pass def get_lines(self, debugger_ui): from pudb.source_view import format_source lines = self.code.splitlines(True) return format_source(debugger_ui, list(decode_lines(lines)), set()) # }}} class DebuggerUI(FrameVarInfoKeeper): # {{{ constructor def __init__(self, dbg, stdin, stdout, term_size): FrameVarInfoKeeper.__init__(self) self.debugger = dbg from urwid import AttrMap from pudb.ui_tools import SearchController self.search_controller = SearchController(self) self.last_module_filter = "" # {{{ build ui # {{{ key bindings def move_up(w, size, key): w.keypress(size, "up") def move_down(w, size, key): w.keypress(size, "down") def move_left(w, size, key): w.keypress(size, "left") def move_right(w, size, key): w.keypress(size, "right") def page_up(w, size, key): w.keypress(size, "page up") def page_down(w, size, key): w.keypress(size, "page down") def move_home(w, size, key): w.keypress(size, "home") def move_end(w, size, key): w.keypress(size, "end") def add_vi_nav_keys(widget): widget.listen("k", move_up) widget.listen("j", move_down) widget.listen("h", move_left) widget.listen("l", move_right) widget.listen("ctrl b", page_up) widget.listen("ctrl f", page_down) widget.listen("ctrl u", page_up) widget.listen("ctrl d", page_down) widget.listen("g", move_home) widget.listen("G", move_end) def add_help_keys(widget, helpfunc): widget.listen("f1", helpfunc) widget.listen("?", helpfunc) # }}} # {{{ left/source column self.source = urwid.SimpleListWalker([]) self.source_list = urwid.ListBox(self.source) self.source_sigwrap = SignalWrap(self.source_list) self.source_attr = urwid.AttrMap(self.source_sigwrap, "source") self.source_hscroll_start = 0 self.cmdline_history = [] self.cmdline_history_position = -1 self.cmdline_contents = urwid.SimpleFocusListWalker([]) self.cmdline_list = urwid.ListBox(self.cmdline_contents) self.cmdline_edit = urwid.Edit([ ("command line prompt", ">>> ") ]) cmdline_edit_attr = urwid.AttrMap(self.cmdline_edit, "command line edit") self.cmdline_edit_sigwrap = SignalWrap( cmdline_edit_attr, is_preemptive=True) def clear_cmdline_history(btn): del self.cmdline_contents[:] self.cmdline_edit_bar = urwid.Columns([ self.cmdline_edit_sigwrap, ("fixed", 10, AttrMap( urwid.Button("Clear", clear_cmdline_history), "command line clear button", "command line focused button")) ]) self.cmdline_pile = urwid.Pile([ ("flow", urwid.Text("Command line: [Ctrl-X]")), ("weight", 1, urwid.AttrMap(self.cmdline_list, "command line output")), ("flow", self.cmdline_edit_bar), ]) self.cmdline_sigwrap = SignalWrap( urwid.AttrMap(self.cmdline_pile, None, "focused sidebar") ) self.cmdline_on = not CONFIG["hide_cmdline_win"] self.cmdline_weight = 1 self.lhs_col = urwid.Pile([ ("weight", 5, self.source_attr), ("weight", self.cmdline_weight if self.cmdline_on else 0, self.cmdline_sigwrap), ]) # }}} # {{{ right column self.locals = urwid.SimpleListWalker([]) self.var_list = SignalWrap( urwid.ListBox(self.locals)) self.stack_walker = urwid.SimpleListWalker([]) self.stack_list = SignalWrap( urwid.ListBox(self.stack_walker)) self.bp_walker = urwid.SimpleListWalker([]) self.bp_list = SignalWrap( urwid.ListBox(self.bp_walker)) self.rhs_col = urwid.Pile([ ("weight", float(CONFIG["variables_weight"]), AttrMap(urwid.Pile([ ("flow", urwid.Text(make_hotkey_markup("_Variables:"))), AttrMap(self.var_list, "variables"), ]), None, "focused sidebar"),), ("weight", float(CONFIG["stack_weight"]), AttrMap(urwid.Pile([ ("flow", urwid.Text(make_hotkey_markup("_Stack:"))), AttrMap(self.stack_list, "stack"), ]), None, "focused sidebar"),), ("weight", float(CONFIG["breakpoints_weight"]), AttrMap(urwid.Pile([ ("flow", urwid.Text(make_hotkey_markup("_Breakpoints:"))), AttrMap(self.bp_list, "breakpoint"), ]), None, "focused sidebar"),), ]) self.rhs_col_sigwrap = SignalWrap(self.rhs_col) def helpside(w, size, key): help(HELP_HEADER + HELP_SIDE + HELP_MAIN + HELP_LICENSE) add_vi_nav_keys(self.rhs_col_sigwrap) add_help_keys(self.rhs_col_sigwrap, helpside) # }}} self.columns = urwid.Columns( [ ("weight", 1, self.lhs_col), ("weight", float(CONFIG["sidebar_width"]), self.rhs_col_sigwrap), ], dividechars=1) self.caption = urwid.Text("") header = urwid.AttrMap(self.caption, "header") self.top = SignalWrap(urwid.Frame( urwid.AttrMap(self.columns, "background"), header)) # }}} def change_rhs_box(name, index, direction, w, size, key): from pudb.settings import save_config weight = self.rhs_col.item_types[index][1] if direction < 0: if weight > 1/5: weight /= 1.25 else: if weight < 5: weight *= 1.25 CONFIG[name+"_weight"] = weight save_config(CONFIG) self.rhs_col.item_types[index] = "weight", weight self.rhs_col._invalidate() # {{{ variables listeners def get_inspect_info(id_path, read_only=False): return (self.get_frame_var_info(read_only) .get_inspect_info(id_path, read_only)) def collapse_current(var, pos, iinfo): if iinfo.show_detail: # collapse current variable iinfo.show_detail = False else: # collapse parent/container variable if var.parent is not None: p_iinfo = get_inspect_info(var.parent.id_path) p_iinfo.show_detail = False return self.locals.index(var.parent) return None def change_var_state(w, size, key): var, pos = self.var_list._w.get_focus() if var is None: return iinfo = get_inspect_info(var.id_path) focus_index = None if key == "enter" or key == "\\" or key == " ": iinfo.show_detail = not iinfo.show_detail elif key == "h": focus_index = collapse_current(var, pos, iinfo) elif key == "l": iinfo.show_detail = True elif key == "d": iinfo.display_type = "default" elif key == "t": iinfo.display_type = "type" elif key == "r": iinfo.display_type = "repr" elif key == "s": iinfo.display_type = "str" elif key == "i": iinfo.display_type = "id" elif key == "c": iinfo.display_type = CONFIG["custom_stringifier"] elif key == "H": iinfo.highlighted = not iinfo.highlighted elif key == "@": iinfo.repeated_at_top = not iinfo.repeated_at_top elif key == "*": levels = ["public", "private", "all", "public"] iinfo.access_level = levels[levels.index(iinfo.access_level)+1] elif key == "w": iinfo.wrap = not iinfo.wrap elif key == "m": iinfo.show_methods = not iinfo.show_methods self.update_var_view(focus_index=focus_index) def edit_inspector_detail(w, size, key): var, pos = self.var_list._w.get_focus() if var is None: return fvi = self.get_frame_var_info(read_only=False) iinfo = fvi.get_inspect_info(var.id_path, read_only=False) buttons = [ ("OK", True), ("Cancel", False), ] if var.watch_expr is not None: watch_edit = urwid.Edit([ ("label", "Watch expression: ") ], var.watch_expr.expression) id_segment = [urwid.AttrMap(watch_edit, "value"), urwid.Text("")] buttons.extend([None, ("Delete", "del")]) title = "Watch Expression Options" else: id_segment = [ labelled_value("Identifier Path: ", var.id_path), urwid.Text(""), ] title = "Variable Inspection Options" rb_grp_show = [] rb_show_default = urwid.RadioButton(rb_grp_show, "Default", iinfo.display_type == "default") rb_show_type = urwid.RadioButton(rb_grp_show, "Show type()", iinfo.display_type == "type") rb_show_repr = urwid.RadioButton(rb_grp_show, "Show repr()", iinfo.display_type == "repr") rb_show_str = urwid.RadioButton(rb_grp_show, "Show str()", iinfo.display_type == "str") rb_show_id = urwid.RadioButton(rb_grp_show, "Show id()", iinfo.display_type == "id") rb_show_custom = urwid.RadioButton( rb_grp_show, "Show custom (set in prefs)", iinfo.display_type == CONFIG["custom_stringifier"]) rb_grp_access = [] rb_access_public = urwid.RadioButton(rb_grp_access, "Public members", iinfo.access_level == "public") rb_access_private = urwid.RadioButton( rb_grp_access, "Public and private members", iinfo.access_level == "private") rb_access_all = urwid.RadioButton( rb_grp_access, "All members (including __dunder__)", iinfo.access_level == "all") wrap_checkbox = urwid.CheckBox("Line Wrap", iinfo.wrap) expanded_checkbox = urwid.CheckBox("Expanded", iinfo.show_detail) highlighted_checkbox = urwid.CheckBox("Highlighted", iinfo.highlighted) repeated_at_top_checkbox = urwid.CheckBox( "Repeated at top", iinfo.repeated_at_top) show_methods_checkbox = urwid.CheckBox( "Show methods", iinfo.show_methods) lb = urwid.ListBox(urwid.SimpleListWalker( id_segment + rb_grp_show + [urwid.Text("")] + rb_grp_access + [urwid.Text("")] + [ wrap_checkbox, expanded_checkbox, highlighted_checkbox, repeated_at_top_checkbox, show_methods_checkbox, ])) result = self.dialog(lb, buttons, title=title) if result is True: iinfo.show_detail = expanded_checkbox.get_state() iinfo.wrap = wrap_checkbox.get_state() iinfo.highlighted = highlighted_checkbox.get_state() iinfo.repeated_at_top = repeated_at_top_checkbox.get_state() iinfo.show_methods = show_methods_checkbox.get_state() if rb_show_default.get_state(): iinfo.display_type = "default" elif rb_show_type.get_state(): iinfo.display_type = "type" elif rb_show_repr.get_state(): iinfo.display_type = "repr" elif rb_show_str.get_state(): iinfo.display_type = "str" elif rb_show_id.get_state(): iinfo.display_type = "id" elif rb_show_custom.get_state(): iinfo.display_type = CONFIG["custom_stringifier"] if rb_access_public.get_state(): iinfo.access_level = "public" elif rb_access_private.get_state(): iinfo.access_level = "private" elif rb_access_all.get_state(): iinfo.access_level = "all" if var.watch_expr is not None: var.watch_expr.expression = watch_edit.get_edit_text() elif result == "del": for i, watch_expr in enumerate(fvi.watches): if watch_expr is var.watch_expr: del fvi.watches[i] self.update_var_view() def insert_watch(w, size, key): watch_edit = urwid.Edit([ ("label", "Watch expression: ") ]) if self.dialog( urwid.ListBox(urwid.SimpleListWalker([ urwid.AttrMap(watch_edit, "value") ])), [ ("OK", True), ("Cancel", False), ], title="Add Watch Expression"): from pudb.var_view import WatchExpression we = WatchExpression(watch_edit.get_edit_text()) fvi = self.get_frame_var_info(read_only=False) fvi.watches.append(we) self.update_var_view() self.var_list.listen("\\", change_var_state) self.var_list.listen(" ", change_var_state) self.var_list.listen("h", change_var_state) self.var_list.listen("l", change_var_state) self.var_list.listen("d", change_var_state) self.var_list.listen("t", change_var_state) self.var_list.listen("r", change_var_state) self.var_list.listen("s", change_var_state) self.var_list.listen("i", change_var_state) self.var_list.listen("c", change_var_state) self.var_list.listen("H", change_var_state) self.var_list.listen("@", change_var_state) self.var_list.listen("*", change_var_state) self.var_list.listen("w", change_var_state) self.var_list.listen("m", change_var_state) self.var_list.listen("enter", change_var_state) self.var_list.listen("e", edit_inspector_detail) self.var_list.listen("n", insert_watch) self.var_list.listen("insert", insert_watch) self.var_list.listen("[", partial(change_rhs_box, "variables", 0, -1)) self.var_list.listen("]", partial(change_rhs_box, "variables", 0, 1)) # }}} # {{{ stack listeners def examine_frame(w, size, key): _, pos = self.stack_list._w.get_focus() self.debugger.set_frame_index(self.translate_ui_stack_index(pos)) self.stack_list.listen("enter", examine_frame) def open_file_editor(file_name, line_number): file_changed = False try: original_modification_time = os.path.getmtime(file_name) self.screen.stop() filename_edited = self.debugger.open_file_to_edit(file_name, line_number) self.screen.start() new_modification_time = os.path.getmtime(file_name) file_changed = new_modification_time - original_modification_time > 0 except Exception: from traceback import format_exception self.message("Exception happened when trying to edit the file:" "\n\n%s" % ("".join(format_exception(*sys.exc_info()))), title="File Edit Error") return if file_changed: self.message("File is changed, but the execution is continued with" " the 'old' codebase.\n" f"Changed file: {filename_edited}\n\n" "Please quit and restart to see changes", title="File is changed") def open_editor_on_stack_frame(w, size, key): _, pos = self.stack_list._w.get_focus() index = self.translate_ui_stack_index(pos) curframe, line_number = self.debugger.stack[index] file_name = curframe.f_code.co_filename open_file_editor(file_name, line_number) self.stack_list.listen("ctrl e", open_editor_on_stack_frame) def move_stack_top(w, size, key): self.debugger.set_frame_index(len(self.debugger.stack)-1) def move_stack_up(w, size, key): self.debugger.move_up_frame() def move_stack_down(w, size, key): self.debugger.move_down_frame() self.stack_list.listen("H", move_stack_top) self.stack_list.listen("u", move_stack_up) self.stack_list.listen("d", move_stack_down) self.stack_list.listen("[", partial(change_rhs_box, "stack", 1, -1)) self.stack_list.listen("]", partial(change_rhs_box, "stack", 1, 1)) # }}} # {{{ breakpoint listeners def save_breakpoints(w, size, key): self.debugger.save_breakpoints() def delete_breakpoint(w, size, key): bp_source_identifier = \ self.source_code_provider.get_source_identifier() if bp_source_identifier is None: self.message( "Cannot currently delete a breakpoint here--" "source code does not correspond to a file location. " "(perhaps this is generated code)") bp_list = self._get_bp_list() if bp_list: _, pos = self.bp_list._w.get_focus() bp = bp_list[pos] if bp_source_identifier == bp.file and bp.line-1 < len(self.source): self.source[bp.line-1].set_breakpoint(False) err = self.debugger.clear_break(bp.file, bp.line) if err: self.message("Error clearing breakpoint:\n" + err) else: self.update_breakpoints() def enable_disable_breakpoint(w, size, key): bp_entry, pos = self.bp_list._w.get_focus() if bp_entry is None: return bp = self._get_bp_list()[pos] bp.enabled = not bp.enabled sline = self.source[bp.line-1] sline.set_breakpoint(bp.enabled) self.update_breakpoints() def examine_breakpoint(w, size, key): bp_entry, pos = self.bp_list._w.get_focus() if bp_entry is None: return bp = self._get_bp_list()[pos] if bp.cond is None: cond = "" else: cond = str(bp.cond) enabled_checkbox = urwid.CheckBox( "Enabled", bp.enabled) cond_edit = urwid.Edit([ ("label", "Condition: ") ], cond) ign_count_edit = urwid.IntEdit([ ("label", "Ignore the next N times: ") ], bp.ignore) lb = urwid.ListBox(urwid.SimpleListWalker([ labelled_value("File: ", bp.file), labelled_value("Line: ", bp.line), labelled_value("Hits: ", bp.hits), urwid.Text(""), enabled_checkbox, urwid.AttrMap(cond_edit, "value", "value"), urwid.AttrMap(ign_count_edit, "value", "value"), ])) result = self.dialog(lb, [ ("OK", True), ("Cancel", False), None, ("Delete", "del"), ("Location", "loc"), ], title="Edit Breakpoint") if result is True: bp.enabled = enabled_checkbox.get_state() bp.ignore = int(ign_count_edit.value()) cond = cond_edit.get_edit_text() if cond: bp.cond = cond else: bp.cond = None elif result == "loc": self.show_line(bp.line, FileSourceCodeProvider(self.debugger, bp.file)) self.columns.set_focus(0) elif result == "del": bp_source_identifier = \ self.source_code_provider.get_source_identifier() if bp_source_identifier is None: self.message( "Cannot currently delete a breakpoint here--" "source code does not correspond to a file location. " "(perhaps this is generated code)") if bp_source_identifier == bp.file: self.source[bp.line-1].set_breakpoint(False) err = self.debugger.clear_break(bp.file, bp.line) if err: self.message("Error clearing breakpoint:\n" + err) else: self.update_breakpoints() def show_breakpoint(w, size, key): bp_entry, pos = self.bp_list._w.get_focus() if bp_entry is not None: bp = self._get_bp_list()[pos] self.show_line(bp.line, FileSourceCodeProvider(self.debugger, bp.file)) self.bp_list.listen("enter", show_breakpoint) self.bp_list.listen("d", delete_breakpoint) self.bp_list.listen("s", save_breakpoints) self.bp_list.listen("e", examine_breakpoint) self.bp_list.listen("b", enable_disable_breakpoint) self.bp_list.listen("H", move_stack_top) self.bp_list.listen("[", partial(change_rhs_box, "breakpoints", 2, -1)) self.bp_list.listen("]", partial(change_rhs_box, "breakpoints", 2, 1)) # }}} # {{{ source listeners def end(): self.debugger.save_breakpoints() self.quit_event_loop = True def next_line(w, size, key): if self.debugger.post_mortem: self.message("Post-mortem mode: Can't modify state.") else: self.debugger.set_next(self.debugger.curframe) end() def step(w, size, key): if self.debugger.post_mortem: self.message("Post-mortem mode: Can't modify state.") else: self.debugger.set_step() end() def finish(w, size, key): if self.debugger.post_mortem: self.message("Post-mortem mode: Can't modify state.") else: self.debugger.set_return(self.debugger.curframe) end() def cont(w, size, key): if self.debugger.post_mortem: self.message("Post-mortem mode: Can't modify state.") else: self.debugger.set_continue() end() def run_to_cursor(w, size, key): if self.debugger.post_mortem: self.message("Post-mortem mode: Can't modify state.") else: sline, pos = self.source.get_focus() lineno = pos+1 bp_source_identifier = \ self.source_code_provider.get_source_identifier() if bp_source_identifier is None: self.message( "Cannot currently set a breakpoint here--" "source code does not correspond to a file location. " "(perhaps this is generated code)") from pudb.lowlevel import get_breakpoint_invalid_reason invalid_reason = get_breakpoint_invalid_reason( bp_source_identifier, lineno) if invalid_reason is not None: self.message( "Cannot run to the line you indicated, " "for the following reason:\n\n" + invalid_reason) else: err = self.debugger.set_break( bp_source_identifier, pos+1, temporary=True) if err: self.message("Error dealing with breakpoint:\n" + err) self.debugger.set_continue() end() def go_to_line(w, size, key): _, line = self.source.get_focus() lineno_edit = urwid.IntEdit([ ("label", "Go to Line :") ], None) if self.dialog( urwid.ListBox(urwid.SimpleListWalker([ labelled_value("File :", self.source_code_provider.identifier()), labelled_value("Current Line :", line+1), urwid.AttrMap(lineno_edit, "value") ])), [ ("OK", True), ("Cancel", False), ], title="Go to Line Number"): lineno = min(max(0, int(lineno_edit.value())-1), len(self.source)-1) self.source.set_focus(lineno) def scroll_left(w, size, key): self.source_hscroll_start = max( 0, self.source_hscroll_start - 4) for sl in self.source: sl._invalidate() def scroll_right(w, size, key): self.source_hscroll_start += 4 for sl in self.source: sl._invalidate() def search(w, size, key): self.search_controller.open_search_ui() def search_next(w, size, key): self.search_controller.perform_search(dir=1, update_search_start=True) def search_previous(w, size, key): self.search_controller.perform_search(dir=-1, update_search_start=True) def toggle_breakpoint(w, size, key): bp_source_identifier = \ self.source_code_provider.get_source_identifier() if bp_source_identifier: sline, pos = self.source.get_focus() lineno = pos+1 existing_breaks = self.debugger.get_breaks( bp_source_identifier, lineno) if existing_breaks: err = None for bp in existing_breaks: if not bp.enabled: bp.enable() sline.set_breakpoint(True) # Unsure about this. Are multiple breakpoints even # possible? break else: err = self.debugger.clear_break(bp_source_identifier, lineno) sline.set_breakpoint(False) else: file_lineno = (bp_source_identifier, lineno) if file_lineno in self.debugger.set_traces: self.debugger.set_traces[file_lineno] = \ not self.debugger.set_traces[file_lineno] sline.set_breakpoint(self.debugger.set_traces[file_lineno]) return from pudb.lowlevel import get_breakpoint_invalid_reason invalid_reason = get_breakpoint_invalid_reason( bp_source_identifier, pos+1) if invalid_reason is not None: do_set = not self.dialog( urwid.ListBox( urwid.SimpleListWalker([ urwid.Text( "The breakpoint you just set may be " "invalid, for the following reason:\n\n" + invalid_reason), ])), [ ("Cancel", True), ("Set Anyway", False), ], title="Possibly Invalid Breakpoint", focus_buttons=True) else: do_set = True if do_set: err = self.debugger.set_break(bp_source_identifier, pos+1) sline.set_breakpoint(True) else: err = None if err: self.message("Error dealing with breakpoint:\n" + err) self.update_breakpoints() else: self.message( "Cannot currently set a breakpoint here--" "source code does not correspond to a file location. " "(perhaps this is generated code)") def pick_module(w, size, key): from os.path import splitext import sys def mod_exists(mod): if not hasattr(mod, "__file__"): return False if mod.__file__ is None: return False filename = mod.__file__ base, ext = splitext(filename) ext = ext.lower() from os.path import exists if ext == ".pyc": return exists(base+".py") else: return ext == ".py" new_mod_text = SelectableText("-- update me --") new_mod_entry = urwid.AttrMap(new_mod_text, None, "focused selectable") def build_filtered_mod_list(filt_string=""): modules = sorted(name # mod_exists may change the size of sys.modules, # causing this to crash. Copy to a list. for name, mod in list(sys.modules.items()) if mod_exists(mod)) result = [urwid.AttrMap(SelectableText(mod), None, "focused selectable") for mod in modules if filt_string in mod] new_mod_text.set_text("<<< IMPORT MODULE '%s' >>>" % filt_string) result.append(new_mod_entry) return result def show_mod(mod): filename = self.debugger.canonic(mod.__file__) base, ext = splitext(filename) if ext == ".pyc": ext = ".py" filename = base+".py" self.set_source_code_provider( FileSourceCodeProvider(self.debugger, filename)) self.source_list.set_focus(0) class FilterEdit(urwid.Edit): def keypress(self, size, key): result = urwid.Edit.keypress(self, size, key) if result is None: mod_list[:] = build_filtered_mod_list( self.get_edit_text()) return result filt_edit = FilterEdit([("label", "Filter: ")], self.last_module_filter) mod_list = urwid.SimpleListWalker( build_filtered_mod_list(filt_edit.get_edit_text())) lb = urwid.ListBox(mod_list) w = urwid.Pile([ ("flow", urwid.AttrMap(filt_edit, "value")), ("fixed", 1, urwid.SolidFill()), urwid.AttrMap(lb, "selectable")]) while True: result = self.dialog(w, [ ("OK", True), ("Cancel", False), ("Reload", "reload"), ], title="Pick Module") self.last_module_filter = filt_edit.get_edit_text() if result is True: widget, pos = lb.get_focus() if widget is new_mod_entry: new_mod_name = filt_edit.get_edit_text() try: __import__(str(new_mod_name)) except Exception: from traceback import format_exception self.message( "Could not import module '{}':\n\n{}".format( new_mod_name, "".join( format_exception(*sys.exc_info()))), title="Import Error") else: show_mod(__import__(str(new_mod_name))) break else: show_mod(sys.modules[widget.base_widget.get_text()[0]]) break elif result is False: break elif result == "reload": widget, pos = lb.get_focus() if widget is not new_mod_entry: mod_name = widget.base_widget.get_text()[0] mod = sys.modules[mod_name] import importlib importlib.reload(mod) self.message("'%s' was successfully reloaded." % mod_name) if self.source_code_provider is not None: self.source_code_provider.clear_cache() self.set_source_code_provider(self.source_code_provider, force_update=True) _, pos = self.stack_list._w.get_focus() self.debugger.set_frame_index( self.translate_ui_stack_index(pos)) def helpmain(w, size, key): help(HELP_HEADER + HELP_MAIN + HELP_SIDE + HELP_LICENSE) self.source_sigwrap.listen("n", next_line) self.source_sigwrap.listen("s", step) self.source_sigwrap.listen("f", finish) self.source_sigwrap.listen("r", finish) self.source_sigwrap.listen("c", cont) self.source_sigwrap.listen("t", run_to_cursor) self.source_sigwrap.listen("L", go_to_line) self.source_sigwrap.listen("/", search) self.source_sigwrap.listen(",", search_previous) self.source_sigwrap.listen(".", search_next) self.source_sigwrap.listen("b", toggle_breakpoint) self.source_sigwrap.listen("m", pick_module) self.source_sigwrap.listen("H", move_stack_top) self.source_sigwrap.listen("u", move_stack_up) self.source_sigwrap.listen("d", move_stack_down) # left/right scrolling have to be handled specially, normal vi keys # don't cut it self.source_sigwrap.listen("h", scroll_left) self.source_sigwrap.listen("l", scroll_right) add_vi_nav_keys(self.source_sigwrap) add_help_keys(self.source_sigwrap, helpmain) # }}} # {{{ command line listeners def cmdline_get_namespace(): curframe = self.debugger.curframe from pudb.shell import SetPropagatingDict return SetPropagatingDict( [curframe.f_locals, curframe.f_globals], curframe.f_locals) def cmdline_tab_complete(w, size, key): try: from jedi import Interpreter except ImportError: self.add_cmdline_content( "Tab completion requires jedi to be installed. ", "command line error") return import jedi from distutils.version import LooseVersion if LooseVersion(jedi.__version__) < LooseVersion("0.16.0"): self.add_cmdline_content( "jedi 0.16.0 is required for Tab completion", "command line error") text = self.cmdline_edit.edit_text pos = self.cmdline_edit.edit_pos chopped_text = text[:pos] suffix = text[pos:] try: completions = Interpreter( chopped_text, [cmdline_get_namespace()]).complete() except Exception as e: # Jedi sometimes produces errors. Ignore them. self.add_cmdline_content( "Could not tab complete (Jedi error: '%s')" % e, "command line error") return full_completions = [i.name_with_symbols for i in completions] chopped_completions = [i.complete for i in completions] def common_prefix(a, b): for i, (a_i, b_i) in enumerate(zip(a, b)): if a_i != b_i: return a[:i] return a[:max(len(a), len(b))] common_compl_prefix = None for completion in chopped_completions: if common_compl_prefix is None: common_compl_prefix = completion else: common_compl_prefix = common_prefix( common_compl_prefix, completion) completed_chopped_text = common_compl_prefix if completed_chopped_text is None: return if ( len(completed_chopped_text) == 0 and len(completions) > 1): self.add_cmdline_content( " ".join(full_completions), "command line output") return self.cmdline_edit.edit_text = \ chopped_text+completed_chopped_text+suffix self.cmdline_edit.edit_pos = ( len(chopped_text) + len(completed_chopped_text)) def cmdline_append_newline(w, size, key): self.cmdline_edit.insert_text("\n") def cmdline_exec(w, size, key): cmd = self.cmdline_edit.get_edit_text() if not cmd: # blank command -> refuse service return self.add_cmdline_content(">>> " + cmd, "command line input") if not self.cmdline_history or cmd != self.cmdline_history[-1]: self.cmdline_history.append(cmd) self.cmdline_history_position = -1 prev_sys_stdin = sys.stdin prev_sys_stdout = sys.stdout prev_sys_stderr = sys.stderr from io import StringIO sys.stdin = None sys.stderr = sys.stdout = StringIO() try: eval(compile(cmd, "<pudb command line>", "single"), cmdline_get_namespace()) except Exception: tp, val, tb = sys.exc_info() import traceback tblist = traceback.extract_tb(tb) del tblist[:1] tb_lines = traceback.format_list(tblist) if tb_lines: tb_lines.insert(0, "Traceback (most recent call last):\n") tb_lines[len(tb_lines):] = traceback.format_exception_only(tp, val) self.add_cmdline_content("".join(tb_lines), "command line error") else: self.cmdline_edit.set_edit_text("") finally: if sys.stdout.getvalue(): self.add_cmdline_content(sys.stdout.getvalue(), "command line output") sys.stdin = prev_sys_stdin sys.stdout = prev_sys_stdout sys.stderr = prev_sys_stderr def cmdline_history_browse(direction): if self.cmdline_history_position == -1: self.cmdline_history_position = len(self.cmdline_history) self.cmdline_history_position += direction if 0 <= self.cmdline_history_position < len(self.cmdline_history): self.cmdline_edit.edit_text = \ self.cmdline_history[self.cmdline_history_position] else: self.cmdline_history_position = -1 self.cmdline_edit.edit_text = "" self.cmdline_edit.edit_pos = len(self.cmdline_edit.edit_text) def cmdline_history_prev(w, size, key): cmdline_history_browse(-1) def cmdline_history_next(w, size, key): cmdline_history_browse(1) def cmdline_start_of_line(w, size, key): self.cmdline_edit.edit_pos = 0 def cmdline_end_of_line(w, size, key): self.cmdline_edit.edit_pos = len(self.cmdline_edit.edit_text) def cmdline_del_word(w, size, key): pos = self.cmdline_edit.edit_pos before, after = ( self.cmdline_edit.edit_text[:pos], self.cmdline_edit.edit_text[pos:]) before = before[::-1] before = before.lstrip() i = 0 while i < len(before): if not before[i].isspace(): i += 1 else: break self.cmdline_edit.edit_text = before[i:][::-1] + after self.cmdline_edit.edit_post = len(before[i:]) def cmdline_del_to_start_of_line(w, size, key): pos = self.cmdline_edit.edit_pos self.cmdline_edit.edit_text = self.cmdline_edit.edit_text[pos:] self.cmdline_edit.edit_pos = 0 def toggle_cmdline_focus(w, size, key): self.columns.set_focus(self.lhs_col) if self.lhs_col.get_focus() is self.cmdline_sigwrap: if CONFIG["hide_cmdline_win"]: self.set_cmdline_state(False) self.lhs_col.set_focus(self.search_controller.search_AttrMap if self.search_controller.search_box else self.source_attr) else: if CONFIG["hide_cmdline_win"]: self.set_cmdline_state(True) self.cmdline_pile.set_focus(self.cmdline_edit_bar) self.lhs_col.set_focus(self.cmdline_sigwrap) self.cmdline_edit_sigwrap.listen("tab", cmdline_tab_complete) self.cmdline_edit_sigwrap.listen("ctrl v", cmdline_append_newline) self.cmdline_edit_sigwrap.listen("enter", cmdline_exec) self.cmdline_edit_sigwrap.listen("ctrl n", cmdline_history_next) self.cmdline_edit_sigwrap.listen("ctrl p", cmdline_history_prev) self.cmdline_edit_sigwrap.listen("esc", toggle_cmdline_focus) self.cmdline_edit_sigwrap.listen("ctrl d", toggle_cmdline_focus) self.cmdline_edit_sigwrap.listen("ctrl a", cmdline_start_of_line) self.cmdline_edit_sigwrap.listen("ctrl e", cmdline_end_of_line) self.cmdline_edit_sigwrap.listen("ctrl w", cmdline_del_word) self.cmdline_edit_sigwrap.listen("ctrl u", cmdline_del_to_start_of_line) self.top.listen("ctrl x", toggle_cmdline_focus) # {{{ command line sizing def set_cmdline_default_size(weight): self.cmdline_weight = weight self.set_cmdline_size() def max_cmdline(w, size, key): set_cmdline_default_size(5) def min_cmdline(w, size, key): set_cmdline_default_size(1/2) def grow_cmdline(w, size, key): weight = self.cmdline_weight if weight < 5: weight *= 1.25 set_cmdline_default_size(weight) def shrink_cmdline(w, size, key): weight = self.cmdline_weight if weight > 1/2: weight /= 1.25 set_cmdline_default_size(weight) self.cmdline_sigwrap.listen("=", max_cmdline) self.cmdline_sigwrap.listen("+", grow_cmdline) self.cmdline_sigwrap.listen("_", min_cmdline) self.cmdline_sigwrap.listen("-", shrink_cmdline) # }}} # }}} # {{{ sidebar sizing def max_sidebar(w, size, key): from pudb.settings import save_config weight = 5 CONFIG["sidebar_width"] = weight save_config(CONFIG) self.columns.column_types[1] = "weight", weight self.columns._invalidate() def min_sidebar(w, size, key): from pudb.settings import save_config weight = 1/5 CONFIG["sidebar_width"] = weight save_config(CONFIG) self.columns.column_types[1] = "weight", weight self.columns._invalidate() def grow_sidebar(w, size, key): from pudb.settings import save_config weight = self.columns.column_types[1][1] if weight < 5: weight *= 1.25 CONFIG["sidebar_width"] = weight save_config(CONFIG) self.columns.column_types[1] = "weight", weight self.columns._invalidate() def shrink_sidebar(w, size, key): from pudb.settings import save_config weight = self.columns.column_types[1][1] if weight > 1/5: weight /= 1.25 CONFIG["sidebar_width"] = weight save_config(CONFIG) self.columns.column_types[1] = "weight", weight self.columns._invalidate() self.rhs_col_sigwrap.listen("=", max_sidebar) self.rhs_col_sigwrap.listen("+", grow_sidebar) self.rhs_col_sigwrap.listen("_", min_sidebar) self.rhs_col_sigwrap.listen("-", shrink_sidebar) # }}} # {{{ top-level listeners def show_output(w, size, key): self.screen.stop() input("Hit Enter to return:") self.screen.start() def reload_breakpoints_and_redisplay(): reload_breakpoints() curr_line = self.current_line self.set_source_code_provider(self.source_code_provider, force_update=True) if curr_line is not None: self.current_line = self.source[int(curr_line.line_nr)-1] self.current_line.set_current(True) def reload_breakpoints(): self.debugger.clear_all_breaks() from pudb.settings import load_breakpoints for bpoint_descr in load_breakpoints(): dbg.set_break(*bpoint_descr) self.update_breakpoints() def show_traceback(w, size, key): if self.current_exc_tuple is not None: from traceback import format_exception result = self.dialog( urwid.ListBox(urwid.SimpleListWalker([urwid.Text( "".join(format_exception(*self.current_exc_tuple)))])), [ ("Close", "close"), ("Location", "location") ], title="Exception Viewer", focus_buttons=True, bind_enter_esc=False) if result == "location": self.debugger.set_frame_index(len(self.debugger.stack)-1) else: self.message("No exception available.") def run_external_cmdline(w, size, key): self.screen.stop() curframe = self.debugger.curframe import pudb.shell as shell if CONFIG["shell"] == "ipython" and shell.have_ipython(): runner = shell.run_ipython_shell elif CONFIG["shell"] == "ipython_kernel" and shell.have_ipython(): runner = shell.run_ipython_kernel elif CONFIG["shell"] == "bpython" and shell.HAVE_BPYTHON: runner = shell.run_bpython_shell elif CONFIG["shell"] == "ptpython" and shell.HAVE_PTPYTHON: runner = shell.run_ptpython_shell elif CONFIG["shell"] == "ptipython" and shell.HAVE_PTIPYTHON: runner = shell.run_ptipython_shell elif CONFIG["shell"] == "classic": runner = shell.run_classic_shell else: try: if not shell.custom_shell_dict: # Only execfile once from os.path import expanduser cshell_fname = expanduser(CONFIG["shell"]) with open(cshell_fname) as inf: exec(compile(inf.read(), cshell_fname, "exec"), shell.custom_shell_dict, shell.custom_shell_dict) except Exception: print("Error when importing custom shell:") from traceback import print_exc print_exc() print("Falling back to classic shell") runner = shell.run_classic_shell else: if "pudb_shell" not in shell.custom_shell_dict: print("%s does not contain a function named pudb_shell at " "the module level." % CONFIG["shell"]) print("Falling back to classic shell") runner = shell.run_classic_shell else: runner = shell.custom_shell_dict["pudb_shell"] runner(curframe.f_globals, curframe.f_locals) self.screen.start() self.update_var_view() def run_cmdline(w, size, key): if CONFIG["shell"] == "internal": return toggle_cmdline_focus(w, size, key) else: return run_external_cmdline(w, size, key) def focus_code(w, size, key): self.columns.set_focus(self.lhs_col) self.lhs_col.set_focus(self.source_attr) class RHColumnFocuser: def __init__(self, idx): self.idx = idx def __call__(subself, w, size, key): # noqa # pylint: disable=no-self-argument self.columns.set_focus(self.rhs_col_sigwrap) self.rhs_col.set_focus(self.rhs_col.widget_list[subself.idx]) def quit(w, size, key): self.debugger.set_quit() end() def do_edit_config(w, size, key): self.run_edit_config() def redraw_screen(w, size, key): self.screen.clear() def help(pages): self.message(pages, title="PuDB - The Python Urwid Debugger") def edit_current_frame(w, size, key): _, pos = self.source.get_focus() source_identifier = \ self.source_code_provider.get_source_identifier() if source_identifier is None: self.message( "Cannot edit the current file--" "source code does not correspond to a file location. " "(perhaps this is generated code)") open_file_editor(source_identifier, pos+1) self.top.listen("o", show_output) self.top.listen("ctrl r", lambda w, size, key: reload_breakpoints_and_redisplay()) self.top.listen("!", run_cmdline) self.top.listen("e", show_traceback) self.top.listen("C", focus_code) self.top.listen("V", RHColumnFocuser(0)) self.top.listen("S", RHColumnFocuser(1)) self.top.listen("B", RHColumnFocuser(2)) self.top.listen("q", quit) self.top.listen("ctrl p", do_edit_config) self.top.listen("ctrl l", redraw_screen) self.top.listen("ctrl e", edit_current_frame) # }}} # {{{ setup want_curses_display = ( CONFIG["display"] == "curses" or ( CONFIG["display"] == "auto" and not ( os.environ.get("TERM", "").startswith("xterm") or os.environ.get("TERM", "").startswith("rxvt") ))) if (want_curses_display and not (stdin is not None or stdout is not None) and CursesScreen is not None): self.screen = ThreadsafeCursesScreen() else: screen_kwargs = {} if stdin is not None: screen_kwargs["input"] = stdin if stdout is not None: screen_kwargs["output"] = stdout if term_size is not None: screen_kwargs["term_size"] = term_size if screen_kwargs: self.screen = ThreadsafeFixedSizeRawScreen(**screen_kwargs) else: self.screen = ThreadsafeRawScreen() del want_curses_display if curses: try: curses.setupterm() except Exception: # Something went wrong--oh well. Nobody will die if their # 256 color support breaks. Just carry on without it. # https://github.com/inducer/pudb/issues/78 pass else: color_support = curses.tigetnum("colors") if color_support == 256 and isinstance(self.screen, RawScreen): self.screen.set_terminal_properties(256) self.setup_palette(self.screen) self.show_count = 0 self.source_code_provider = None self.current_line = None self.quit_event_loop = False # }}} # }}} # {{{ UI helpers def add_cmdline_content(self, s, attr): s = s.rstrip("\n") from pudb.ui_tools import SelectableText self.cmdline_contents.append( urwid.AttrMap(SelectableText(s), attr, "focused "+attr)) # scroll to end of last entry self.cmdline_list.set_focus_valign("bottom") self.cmdline_list.set_focus(len(self.cmdline_contents) - 1, coming_from="above") # Force the commandline to be visible self.set_cmdline_state(True) def reset_cmdline_size(self): self.lhs_col.item_types[-1] = "weight", \ self.cmdline_weight if self.cmdline_on else 0 def set_cmdline_size(self, weight=None): if weight is None: weight = self.cmdline_weight self.lhs_col.item_types[-1] = "weight", weight self.lhs_col._invalidate() def set_cmdline_state(self, state_on): if state_on != self.cmdline_on: self.cmdline_on = state_on self.set_cmdline_size(None if state_on else 0) def translate_ui_stack_index(self, index): # note: self-inverse if CONFIG["current_stack_frame"] == "top": return len(self.debugger.stack)-1-index elif CONFIG["current_stack_frame"] == "bottom": return index else: raise ValueError("invalid value for 'current_stack_frame' pref") def message(self, msg, title="Message", **kwargs): self.call_with_ui(self.dialog, urwid.ListBox(urwid.SimpleListWalker([urwid.Text(msg)])), [("OK", True)], title=title, **kwargs) def run_edit_config(self): from pudb.settings import edit_config, save_config edit_config(self, CONFIG) save_config(CONFIG) def dialog(self, content, buttons_and_results, title=None, bind_enter_esc=True, focus_buttons=False, extra_bindings=[]): class ResultSetter: def __init__(subself, res): # noqa: N805, E501 # pylint: disable=no-self-argument subself.res = res def __call__(subself, btn): # noqa: N805, E501 # pylint: disable=no-self-argument self.quit_event_loop = [subself.res] Attr = urwid.AttrMap # noqa if bind_enter_esc: content = SignalWrap(content) def enter(w, size, key): self.quit_event_loop = [True] def esc(w, size, key): self.quit_event_loop = [False] content.listen("enter", enter) content.listen("esc", esc) button_widgets = [] for btn_descr in buttons_and_results: if btn_descr is None: button_widgets.append(urwid.Text("")) else: btn_text, btn_result = btn_descr button_widgets.append( Attr(urwid.Button(btn_text, ResultSetter(btn_result)), "button", "focused button")) w = urwid.Columns([ content, ("fixed", 15, urwid.ListBox(urwid.SimpleListWalker(button_widgets))), ], dividechars=1) if focus_buttons: w.set_focus_column(1) if title is not None: w = urwid.Pile([ ("flow", urwid.AttrMap( urwid.Text(title, align="center"), "dialog title")), ("fixed", 1, urwid.SolidFill()), w]) class ResultSettingEventHandler: def __init__(subself, res): # noqa: N805, E501 # pylint: disable=no-self-argument subself.res = res def __call__(subself, w, size, key): # noqa: N805, E501 # pylint: disable=no-self-argument self.quit_event_loop = [subself.res] w = SignalWrap(w) for key, binding in extra_bindings: if isinstance(binding, str): w.listen(key, ResultSettingEventHandler(binding)) else: w.listen(key, binding) w = urwid.LineBox(w) w = urwid.Overlay(w, self.top, align="center", valign="middle", width=("relative", 75), height=("relative", 75), ) w = Attr(w, "background") return self.event_loop(w)[0] @staticmethod def setup_palette(screen): may_use_fancy_formats = not hasattr(urwid.escape, "_fg_attr_xterm") from pudb.theme import get_palette screen.register_palette( get_palette(may_use_fancy_formats, CONFIG["theme"])) def show_exception_dialog(self, exc_tuple): from traceback import format_exception desc = ( "The program has terminated abnormally because of an exception.\n\n" "A full traceback is below. You may recall this traceback at any " "time using the 'e' key. The debugger has entered post-mortem mode " "and will prevent further state changes." ) tb_txt = "".join(format_exception(*exc_tuple)) self._show_exception_dialog( description=desc, error_info=tb_txt, title="Program Terminated for Uncaught Exception", exit_loop_on_ok=True, ) def show_internal_exc_dlg(self, exc_tuple): try: self._show_internal_exc_dlg(exc_tuple) except Exception: ui_log.exception("Error while showing error dialog") def _show_internal_exc_dlg(self, exc_tuple): from traceback import format_exception from pudb import VERSION desc = ( "Pudb has encountered and safely caught an internal exception.\n\n" "The full traceback and some other information can be found " "below. Please report this information, along with details on " "what you were doing at the time the exception occurred, at: " "https://github.com/inducer/pudb/issues" ) error_info = ( "python version: {python}\n" "pudb version: {pudb}\n" "urwid version: {urwid}\n" "{tb}\n" ).format( python=sys.version.replace("\n", " "), pudb=VERSION, urwid=".".join(map(str, urwid.version.VERSION)), tb="".join(format_exception(*exc_tuple)) ) self._show_exception_dialog( description=desc, error_info=error_info, title="Pudb Internal Exception Encountered", ) def _show_exception_dialog(self, description, error_info, title, exit_loop_on_ok=False): res = self.dialog( urwid.ListBox(urwid.SimpleListWalker([urwid.Text( "\n\n".join([description, error_info]) )])), title=title, buttons_and_results=[ ("OK", exit_loop_on_ok), ("Save traceback", "save"), ], ) if res == "save": self._save_traceback(error_info) def _save_traceback(self, error_info): try: from os.path import exists filename = next( fname for n in count() for fname in ["traceback-%d.txt" % n if n else "traceback.txt"] if not exists(fname) ) with open(filename, "w") as outf: outf.write(error_info) self.message("Traceback saved as %s." % filename, title="Success") except Exception: from traceback import format_exception io_tb_txt = "".join(format_exception(*sys.exc_info())) self.message( "An error occurred while trying to write " "the traceback:\n\n" + io_tb_txt, title="I/O error") # }}} # {{{ UI enter/exit def show(self): if self.show_count == 0: self.screen.start() self.show_count += 1 def hide(self): self.show_count -= 1 if self.show_count == 0: self.screen.stop() def call_with_ui(self, f, *args, **kwargs): self.show() try: return f(*args, **kwargs) finally: self.hide() # }}} # {{{ interaction def event_loop(self, toplevel=None): prev_quit_loop = self.quit_event_loop try: import pygments # noqa except ImportError: if not hasattr(self, "pygments_message_shown"): self.pygments_message_shown = True self.message("Package 'pygments' not found. " "Syntax highlighting disabled.") WELCOME_LEVEL = "e039" # noqa if CONFIG["seen_welcome"] < WELCOME_LEVEL: CONFIG["seen_welcome"] = WELCOME_LEVEL from pudb import VERSION self.message("Welcome to PudB %s!\n\n" "PuDB is a full-screen, console-based visual debugger for " "Python. Its goal is to provide all the niceties of modern " "GUI-based debuggers in a more lightweight and " "keyboard-friendly package. " "PuDB allows you to debug code right where you write and test " "it--in a terminal. If you've worked with the excellent " "(but nowadays ancient) DOS-based Turbo Pascal or C tools, " "PuDB's UI might look familiar.\n\n" "If you're new here, welcome! The help screen " "(invoked by hitting '?' after this message) should get you " "on your way.\n" "\nChanges in version 2021.1:\n\n" "- Add shortcut to edit files in source and stack view " "(Gábor Vecsei)\n" "- Major improvements to the variable view " "(Michael van der Kamp)\n" "- Better internal error reporting (Michael van der Kamp)\n" "\nChanges in version 2020.1:\n\n" "- Add vi keys for the sidebar (Asbjørn Apeland)\n" "- Add -m command line switch (Elias Dorneles)\n" "- Debug forked processes (Jonathan Striebel)\n" "- Robustness and logging for internal errors " "(Michael Vanderkamp)\n" "- 'Reverse' remote debugging (jen6)\n" "\nChanges in version 2019.2:\n\n" "- Auto-hide the command line (Mark Blakeney)\n" "- Improve help and add jump to breakpoint (Mark Blakeney)\n" "- Drop Py2.6 support\n" "- Show callable attributes in var view\n" "- Allow scrolling sidebar with j/k\n" "- Fix setting breakpoints in Py3.8 (Aaron Meurer)\n" "\nChanges in version 2019.1:\n\n" "- Allow 'space' as a key to expand variables (Enrico Troeger)\n" "- Have a persistent setting on variable visibility \n" " (Enrico Troeger)\n" "- Enable/partially automate opening the debugger in another \n" " terminal (Anton Barkovsky)\n" "- Make sidebar scrollable with j/k (Clayton Craft)\n" "- Bug fixes.\n" "\nChanges in version 2018.1:\n\n" "- Bug fixes.\n" "\nChanges in version 2017.1.4:\n\n" "- Bug fixes.\n" "\nChanges in version 2017.1.3:\n\n" "- Add handling of safely_stringify_for_pudb to allow custom \n" " per-type stringification.\n" "- Add support for custom shells.\n" "- Better support for 2-wide characters in the var view.\n" "- Bug fixes.\n" "\nChanges in version 2017.1.2:\n\n" "- Bug fixes.\n" "\nChanges in version 2017.1.1:\n\n" "- IMPORTANT: 2017.1 and possibly earlier versions had a \n" " bug with exponential growth of shell history for the \n" " 'classic' shell, which (among other problems) could lead\n" " to slow startup of the classic shell. Check the file\n\n" " ~/.config/pudb/shell-history\n\n" " for size (and useful content) and delete/trim as needed.\n" "\nChanges in version 2017.1:\n\n" "- Many, many bug fixes (thank you to all who contributed!)\n" "\nChanges in version 2016.2:\n\n" "- UI improvements for disabled breakpoints.\n" "- Bug fixes.\n" "\nChanges in version 2016.1:\n\n" "- Fix module browser on Py3.\n" "\nChanges in version 2015.4:\n\n" "- Support for (somewhat rudimentary) remote debugging\n" " through a telnet connection.\n" "- Fix debugging of generated code in Python 3.\n" "\nChanges in version 2015.3:\n\n" "- Disable set_trace lines from the UI (Aaron Meurer)\n" "- Better control over attribute visibility (Ned Batchelder)\n" "\nChanges in version 2015.2:\n\n" "- ptpython support (P. Varet)\n" "- Improved rxvt support (Louper Rouch)\n" "- More keyboard shortcuts in the command line" "(Alex Sheluchin)\n" "\nChanges in version 2015.1:\n\n" "- Add solarized theme (Rinat Shigapov)\n" "- More keyboard shortcuts in the command line" "(Alexander Corwin)\n" "\nChanges in version 2014.1:\n\n" "- Make prompt-on-quit optional (Mike Burr)\n" "- Make tab completion in the built-in shell saner\n" "- Fix handling of unicode source\n" " (reported by Morten Nielsen and Buck Golemon)\n" "\nChanges in version 2013.5.1:\n\n" "- Fix loading of saved breakpoint conditions " "(Antoine Dechaume)\n" "- Fixes for built-in command line\n" "- Theme updates\n" "\nChanges in version 2013.5:\n\n" "- Add command line window\n" "- Uses curses display driver when appropriate\n" "\nChanges in version 2013.4:\n\n" "- Support for debugging generated code\n" "\nChanges in version 2013.3.5:\n\n" "- IPython fixes (Aaron Meurer)\n" "- Py2/3 configuration fixes (Somchai Smythe)\n" "- PyPy fixes (Julian Berman)\n" "\nChanges in version 2013.3.4:\n\n" "- Don't die if curses doesn't like what stdin/out are\n" " connected to.\n" "\nChanges in version 2013.3.3:\n\n" "- As soon as pudb is loaded, you can break to the debugger by\n" " evaluating the expression 'pu.db', where 'pu' is a new \n" " 'builtin' that pudb has rudely shoved into the interpreter.\n" "\nChanges in version 2013.3.2:\n\n" "- Don't attempt to do signal handling if a signal handler\n" " is already set (Fix by Buck Golemon).\n" "\nChanges in version 2013.3.1:\n\n" "- Don't ship {ez,distribute}_setup at all.\n" " It breaks more than it helps.\n" "\nChanges in version 2013.3:\n\n" "- Switch to setuptools as a setup helper.\n" "\nChanges in version 2013.2:\n\n" "- Even more bug fixes.\n" "\nChanges in version 2013.1:\n\n" "- Ctrl-C will now break to the debugger in a way that does\n" " not terminate the program\n" "- Lots of bugs fixed\n" "\nChanges in version 2012.3:\n\n" "- Python 3 support (contributed by Brad Froehle)\n" "- Better search box behavior (suggested by Ram Rachum)\n" "- Made it possible to go back and examine state from " "'finished' window. (suggested by Aaron Meurer)\n" "\nChanges in version 2012.2.1:\n\n" "- Don't touch config files during install.\n" "\nChanges in version 2012.2:\n\n" "- Add support for BPython as a shell.\n" "- You can now run 'python -m pudb script.py' on Py 2.6+.\n" " '-m pudb.run' still works--but it's four " "keystrokes longer! :)\n" "\nChanges in version 2012.1:\n\n" "- Work around an API change in IPython 0.12.\n" "\nChanges in version 2011.3.1:\n\n" "- Work-around for bug in urwid >= 1.0.\n" "\nChanges in version 2011.3:\n\n" "- Finer-grained string highlighting " "(contributed by Aaron Meurer)\n" "- Prefs tweaks, instant-apply, top-down stack " "(contributed by Aaron Meurer)\n" "- Size changes in sidebar boxes (contributed by Aaron Meurer)\n" "- New theme 'midnight' (contributed by Aaron Meurer)\n" "- Support for IPython 0.11 (contributed by Chris Farrow)\n" "- Suport for custom stringifiers " "(contributed by Aaron Meurer)\n" "- Line wrapping in variables view " "(contributed by Aaron Meurer)\n" "\nChanges in version 2011.2:\n\n" "- Fix for post-mortem debugging (contributed by 'Sundance')\n" "\nChanges in version 2011.1:\n\n" "- Breakpoints saved between sessions\n" "- A new 'dark vim' theme\n" "(both contributed by Naveen Michaud-Agrawal)\n" "\nChanges in version 0.93:\n\n" "- Stored preferences (no more pesky IPython prompt!)\n" "- Themes\n" "- Line numbers (optional)\n" % VERSION) from pudb.settings import save_config save_config(CONFIG) self.run_edit_config() try: if toplevel is None: toplevel = self.top self.size = self.screen.get_cols_rows() self.quit_event_loop = False while not self.quit_event_loop: canvas = toplevel.render(self.size, focus=True) self.screen.draw_screen(self.size, canvas) keys = self.screen.get_input() for k in keys: if k == "window resize": self.size = self.screen.get_cols_rows() else: try: toplevel.keypress(self.size, k) except Exception: self.show_internal_exc_dlg(sys.exc_info()) return self.quit_event_loop finally: self.quit_event_loop = prev_quit_loop # }}} # {{{ debugger-facing interface def interaction(self, exc_tuple, show_exc_dialog=True): self.current_exc_tuple = exc_tuple from pudb import VERSION caption = [(None, "PuDB %s - ?:help n:next s:step into b:breakpoint " "!:python command line" % VERSION)] if self.debugger.post_mortem: if show_exc_dialog and exc_tuple is not None: self.show_exception_dialog(exc_tuple) caption.extend([ (None, " "), ("warning", "[POST-MORTEM MODE]") ]) elif exc_tuple is not None: caption.extend([ (None, " "), ("warning", "[PROCESSING EXCEPTION - hit 'e' to examine]") ]) self.caption.set_text(caption) self.event_loop() def set_source_code_provider(self, source_code_provider, force_update=False): if self.source_code_provider != source_code_provider or force_update: self.source[:] = source_code_provider.get_lines(self) self.source_code_provider = source_code_provider self.current_line = None def show_line(self, line, source_code_provider=None): """Updates the UI so that a certain line is currently in view.""" changed_file = False if source_code_provider is not None: changed_file = self.source_code_provider != source_code_provider self.set_source_code_provider(source_code_provider) line -= 1 if line >= 0 and line < len(self.source): self.source_list.set_focus(line) if changed_file: self.source_list.set_focus_valign("middle") def set_current_line(self, line, source_code_provider): """Updates the UI to show the line currently being executed.""" if self.current_line is not None: self.current_line.set_current(False) self.show_line(line, source_code_provider) line -= 1 if line >= 0 and line < len(self.source): self.current_line = self.source[line] self.current_line.set_current(True) def update_var_view(self, locals=None, globals=None, focus_index=None): if locals is None: locals = self.debugger.curframe.f_locals if globals is None: globals = self.debugger.curframe.f_globals from pudb.var_view import make_var_view self.locals[:] = make_var_view( self.get_frame_var_info(read_only=True), locals, globals) if focus_index is not None: # Have to set the focus _after_ updating the locals list, as there # appears to be a brief moment while reseting the list when the # list is empty but urwid will attempt to set the focus anyway, # which causes problems. try: self.var_list._w.set_focus(focus_index) except IndexError: # sigh oh well we tried pass def _get_bp_list(self): return [bp for fn, bp_lst in self.debugger.get_all_breaks().items() for lineno in bp_lst for bp in self.debugger.get_breaks(fn, lineno) if not bp.temporary] def _format_fname(self, fname): from os.path import dirname, basename name = basename(fname) if name == "__init__.py": name = "..."+dirname(fname)[-10:]+"/"+name return name def update_breakpoints(self): self.bp_walker[:] = [ BreakpointFrame(self.debugger.current_bp == (bp.file, bp.line), self._format_fname(bp.file), bp) for bp in self._get_bp_list()] def update_stack(self): def make_frame_ui(frame_lineno): frame, lineno = frame_lineno code = frame.f_code class_name = None if code.co_argcount and code.co_varnames[0] == "self": try: class_name = frame.f_locals["self"].__class__.__name__ except Exception: from pudb.lowlevel import ui_log message = "Failed to determine class name" ui_log.exception(message) class_name = "!! %s !!" % message return StackFrame(frame is self.debugger.curframe, code.co_name, class_name, self._format_fname(code.co_filename), lineno) frame_uis = [make_frame_ui(fl) for fl in self.debugger.stack] if CONFIG["current_stack_frame"] == "top": frame_uis = frame_uis[::-1] elif CONFIG["current_stack_frame"] == "bottom": pass else: raise ValueError("invalid value for 'current_stack_frame' pref") self.stack_walker[:] = frame_uis def update_cmdline_win(self): self.set_cmdline_state(not CONFIG["hide_cmdline_win"]) # }}} # vim: foldmethod=marker:expandtab:softtabstop=4
pudb/debugger.py
102,392
A Screen subclass that doesn't crash when running from a non-main thread. Updates the UI to show the line currently being executed. Start debugging from `frame`. If frame is not specified, debugging starts from caller's frame. Unlike Bdb.set_trace(), this does not call self.reset(), which causes the debugger to enter bdb source code. This also implements treating set_trace() calls as breakpoints in the PuDB UI. If as_breakpoint=True (the default), this call will be treated like a breakpoint in the UI (you can press 'b' on it to disable breaking here). If paused=False, the debugger will not break here. Updates the UI so that a certain line is currently in view. Initialize signal handler, ignoring errors silently. Restore default signal handler, ignoring errors silently. This method is called when there is the remote possibility that we ever need to stop in this function. This function is called if an exception occurs, but only if we are to stop at or just below this level. This function is called when we stop or break at this line. This function is called when a return trap is set here. {{{ debugger interface Pass remaining kwargs to python debugger framework avoid spurious hangs These (dispatch_line and set_continue) are copied from bdb with the patch from https://bugs.python.org/issue16482 applied. See https://github.com/inducer/pudb/pull/90. Do not re-install the local trace when we are finished debugging, see issues 16482 and 7238. Don't stop except at breakpoints or when finished no breakpoints; run without debugger overhead See pudb issue 52. If this works well enough we should upstream to stdlib bdb.py.self.reset() Mapping of (filename, lineno) to bool. If True, will stop on the set_trace() call at that location. For API compatibility with other debuggers, the second variable can be a traceback object. In that case, we need to retrieve the corresponding exception tuple. Provide separation from current __main__, which is likely pudb.__main__ run. Preserving its namespace is not important, and having the script share it ensures that, e.g., pickle can find types defined there: https://github.com/inducer/pudb/issues/331 When bdb sets tracing, a number of call and line events happens BEFORE debugger even reaches user's code (and the exact sequence of events depends on python version). So we take special measures to avoid stopping before we reach the main script (see user_line and user_call for details). Set up an interrupt handler Implicitly runs in the namespace of __main__. This is basically stolen from the pdb._runmodule from CPython 3.8 https://github.com/python/cpython/blob/a1d3be4623c8ec7069bd34ccdce336be9cdeb644/Lib/pdb.pyL1530 }}} UI stuff -------------------------------------------------------------------- {{{ display setup }}} {{{ source code providers }}} {{{ constructor {{{ build ui {{{ key bindings }}} {{{ left/source column }}} {{{ right column }}} }}} {{{ variables listeners collapse current variable collapse parent/container variable }}} {{{ stack listeners }}} {{{ breakpoint listeners }}} {{{ source listeners Unsure about this. Are multiple breakpoints even possible? mod_exists may change the size of sys.modules, causing this to crash. Copy to a list. left/right scrolling have to be handled specially, normal vi keys don't cut it }}} {{{ command line listeners Jedi sometimes produces errors. Ignore them. blank command -> refuse service {{{ command line sizing }}} }}} {{{ sidebar sizing }}} {{{ top-level listeners Only execfile once noqa pylint: disable=no-self-argument }}} {{{ setup Something went wrong--oh well. Nobody will die if their 256 color support breaks. Just carry on without it. https://github.com/inducer/pudb/issues/78 }}} }}} {{{ UI helpers scroll to end of last entry Force the commandline to be visible note: self-inverse noqa: N805, E501 pylint: disable=no-self-argument noqa: N805, E501 pylint: disable=no-self-argument noqa noqa: N805, E501 pylint: disable=no-self-argument noqa: N805, E501 pylint: disable=no-self-argument }}} {{{ UI enter/exit }}} {{{ interaction noqa noqa }}} {{{ debugger-facing interface Have to set the focus _after_ updating the locals list, as there appears to be a brief moment while reseting the list when the list is empty but urwid will attempt to set the focus anyway, which causes problems. sigh oh well we tried }}} vim: foldmethod=marker:expandtab:softtabstop=4
4,409
en
0.808741
import dis import math import os import unittest import sys import ast import _ast import tempfile import types import textwrap from test import support from test.support import script_helper, requires_debug_ranges from test.support.os_helper import FakePath class TestSpecifics(unittest.TestCase): def compile_single(self, source): compile(source, "<single>", "single") def assertInvalidSingle(self, source): self.assertRaises(SyntaxError, self.compile_single, source) def test_no_ending_newline(self): compile("hi", "<test>", "exec") compile("hi\r", "<test>", "exec") def test_empty(self): compile("", "<test>", "exec") def test_other_newlines(self): compile("\r\n", "<test>", "exec") compile("\r", "<test>", "exec") compile("hi\r\nstuff\r\ndef f():\n pass\r", "<test>", "exec") compile("this_is\rreally_old_mac\rdef f():\n pass", "<test>", "exec") def test_debug_assignment(self): # catch assignments to __debug__ self.assertRaises(SyntaxError, compile, '__debug__ = 1', '?', 'single') import builtins prev = builtins.__debug__ setattr(builtins, '__debug__', 'sure') self.assertEqual(__debug__, prev) setattr(builtins, '__debug__', prev) def test_argument_handling(self): # detect duplicate positional and keyword arguments self.assertRaises(SyntaxError, eval, 'lambda a,a:0') self.assertRaises(SyntaxError, eval, 'lambda a,a=1:0') self.assertRaises(SyntaxError, eval, 'lambda a=1,a=1:0') self.assertRaises(SyntaxError, exec, 'def f(a, a): pass') self.assertRaises(SyntaxError, exec, 'def f(a = 0, a = 1): pass') self.assertRaises(SyntaxError, exec, 'def f(a): global a; a = 1') def test_syntax_error(self): self.assertRaises(SyntaxError, compile, "1+*3", "filename", "exec") def test_none_keyword_arg(self): self.assertRaises(SyntaxError, compile, "f(None=1)", "<string>", "exec") def test_duplicate_global_local(self): self.assertRaises(SyntaxError, exec, 'def f(a): global a; a = 1') def test_exec_with_general_mapping_for_locals(self): class M: "Test mapping interface versus possible calls from eval()." def __getitem__(self, key): if key == 'a': return 12 raise KeyError def __setitem__(self, key, value): self.results = (key, value) def keys(self): return list('xyz') m = M() g = globals() exec('z = a', g, m) self.assertEqual(m.results, ('z', 12)) try: exec('z = b', g, m) except NameError: pass else: self.fail('Did not detect a KeyError') exec('z = dir()', g, m) self.assertEqual(m.results, ('z', list('xyz'))) exec('z = globals()', g, m) self.assertEqual(m.results, ('z', g)) exec('z = locals()', g, m) self.assertEqual(m.results, ('z', m)) self.assertRaises(TypeError, exec, 'z = b', m) class A: "Non-mapping" pass m = A() self.assertRaises(TypeError, exec, 'z = a', g, m) # Verify that dict subclasses work as well class D(dict): def __getitem__(self, key): if key == 'a': return 12 return dict.__getitem__(self, key) d = D() exec('z = a', g, d) self.assertEqual(d['z'], 12) def test_extended_arg(self): longexpr = 'x = x or ' + '-x' * 2500 g = {} code = ''' def f(x): %s %s %s %s %s %s %s %s %s %s # the expressions above have no effect, x == argument while x: x -= 1 # EXTENDED_ARG/JUMP_ABSOLUTE here return x ''' % ((longexpr,)*10) exec(code, g) self.assertEqual(g['f'](5), 0) def test_argument_order(self): self.assertRaises(SyntaxError, exec, 'def f(a=1, b): pass') def test_float_literals(self): # testing bad float literals self.assertRaises(SyntaxError, eval, "2e") self.assertRaises(SyntaxError, eval, "2.0e+") self.assertRaises(SyntaxError, eval, "1e-") self.assertRaises(SyntaxError, eval, "3-4e/21") def test_indentation(self): # testing compile() of indented block w/o trailing newline" s = """ if 1: if 2: pass""" compile(s, "<string>", "exec") # This test is probably specific to CPython and may not generalize # to other implementations. We are trying to ensure that when # the first line of code starts after 256, correct line numbers # in tracebacks are still produced. def test_leading_newlines(self): s256 = "".join(["\n"] * 256 + ["spam"]) co = compile(s256, 'fn', 'exec') self.assertEqual(co.co_firstlineno, 1) self.assertEqual(list(co.co_lines()), [(0, 2, None), (2, 10, 257)]) def test_literals_with_leading_zeroes(self): for arg in ["077787", "0xj", "0x.", "0e", "090000000000000", "080000000000000", "000000000000009", "000000000000008", "0b42", "0BADCAFE", "0o123456789", "0b1.1", "0o4.2", "0b101j", "0o153j", "0b100e1", "0o777e1", "0777", "000777", "000000000000007"]: self.assertRaises(SyntaxError, eval, arg) self.assertEqual(eval("0xff"), 255) self.assertEqual(eval("0777."), 777) self.assertEqual(eval("0777.0"), 777) self.assertEqual(eval("000000000000000000000000000000000000000000000000000777e0"), 777) self.assertEqual(eval("0777e1"), 7770) self.assertEqual(eval("0e0"), 0) self.assertEqual(eval("0000e-012"), 0) self.assertEqual(eval("09.5"), 9.5) self.assertEqual(eval("0777j"), 777j) self.assertEqual(eval("000"), 0) self.assertEqual(eval("00j"), 0j) self.assertEqual(eval("00.0"), 0) self.assertEqual(eval("0e3"), 0) self.assertEqual(eval("090000000000000."), 90000000000000.) self.assertEqual(eval("090000000000000.0000000000000000000000"), 90000000000000.) self.assertEqual(eval("090000000000000e0"), 90000000000000.) self.assertEqual(eval("090000000000000e-0"), 90000000000000.) self.assertEqual(eval("090000000000000j"), 90000000000000j) self.assertEqual(eval("000000000000008."), 8.) self.assertEqual(eval("000000000000009."), 9.) self.assertEqual(eval("0b101010"), 42) self.assertEqual(eval("-0b000000000010"), -2) self.assertEqual(eval("0o777"), 511) self.assertEqual(eval("-0o0000010"), -8) def test_unary_minus(self): # Verify treatment of unary minus on negative numbers SF bug #660455 if sys.maxsize == 2147483647: # 32-bit machine all_one_bits = '0xffffffff' self.assertEqual(eval(all_one_bits), 4294967295) self.assertEqual(eval("-" + all_one_bits), -4294967295) elif sys.maxsize == 9223372036854775807: # 64-bit machine all_one_bits = '0xffffffffffffffff' self.assertEqual(eval(all_one_bits), 18446744073709551615) self.assertEqual(eval("-" + all_one_bits), -18446744073709551615) else: self.fail("How many bits *does* this machine have???") # Verify treatment of constant folding on -(sys.maxsize+1) # i.e. -2147483648 on 32 bit platforms. Should return int. self.assertIsInstance(eval("%s" % (-sys.maxsize - 1)), int) self.assertIsInstance(eval("%s" % (-sys.maxsize - 2)), int) if sys.maxsize == 9223372036854775807: def test_32_63_bit_values(self): a = +4294967296 # 1 << 32 b = -4294967296 # 1 << 32 c = +281474976710656 # 1 << 48 d = -281474976710656 # 1 << 48 e = +4611686018427387904 # 1 << 62 f = -4611686018427387904 # 1 << 62 g = +9223372036854775807 # 1 << 63 - 1 h = -9223372036854775807 # 1 << 63 - 1 for variable in self.test_32_63_bit_values.__code__.co_consts: if variable is not None: self.assertIsInstance(variable, int) def test_sequence_unpacking_error(self): # Verify sequence packing/unpacking with "or". SF bug #757818 i,j = (1, -1) or (-1, 1) self.assertEqual(i, 1) self.assertEqual(j, -1) def test_none_assignment(self): stmts = [ 'None = 0', 'None += 0', '__builtins__.None = 0', 'def None(): pass', 'class None: pass', '(a, None) = 0, 0', 'for None in range(10): pass', 'def f(None): pass', 'import None', 'import x as None', 'from x import None', 'from x import y as None' ] for stmt in stmts: stmt += "\n" self.assertRaises(SyntaxError, compile, stmt, 'tmp', 'single') self.assertRaises(SyntaxError, compile, stmt, 'tmp', 'exec') def test_import(self): succeed = [ 'import sys', 'import os, sys', 'import os as bar', 'import os.path as bar', 'from __future__ import nested_scopes, generators', 'from __future__ import (nested_scopes,\ngenerators)', 'from __future__ import (nested_scopes,\ngenerators,)', 'from sys import stdin, stderr, stdout', 'from sys import (stdin, stderr,\nstdout)', 'from sys import (stdin, stderr,\nstdout,)', 'from sys import (stdin\n, stderr, stdout)', 'from sys import (stdin\n, stderr, stdout,)', 'from sys import stdin as si, stdout as so, stderr as se', 'from sys import (stdin as si, stdout as so, stderr as se)', 'from sys import (stdin as si, stdout as so, stderr as se,)', ] fail = [ 'import (os, sys)', 'import (os), (sys)', 'import ((os), (sys))', 'import (sys', 'import sys)', 'import (os,)', 'import os As bar', 'import os.path a bar', 'from sys import stdin As stdout', 'from sys import stdin a stdout', 'from (sys) import stdin', 'from __future__ import (nested_scopes', 'from __future__ import nested_scopes)', 'from __future__ import nested_scopes,\ngenerators', 'from sys import (stdin', 'from sys import stdin)', 'from sys import stdin, stdout,\nstderr', 'from sys import stdin si', 'from sys import stdin,', 'from sys import (*)', 'from sys import (stdin,, stdout, stderr)', 'from sys import (stdin, stdout),', ] for stmt in succeed: compile(stmt, 'tmp', 'exec') for stmt in fail: self.assertRaises(SyntaxError, compile, stmt, 'tmp', 'exec') def test_for_distinct_code_objects(self): # SF bug 1048870 def f(): f1 = lambda x=1: x f2 = lambda x=2: x return f1, f2 f1, f2 = f() self.assertNotEqual(id(f1.__code__), id(f2.__code__)) def test_lambda_doc(self): l = lambda: "foo" self.assertIsNone(l.__doc__) def test_encoding(self): code = b'# -*- coding: badencoding -*-\npass\n' self.assertRaises(SyntaxError, compile, code, 'tmp', 'exec') code = '# -*- coding: badencoding -*-\n"\xc2\xa4"\n' compile(code, 'tmp', 'exec') self.assertEqual(eval(code), '\xc2\xa4') code = '"\xc2\xa4"\n' self.assertEqual(eval(code), '\xc2\xa4') code = b'"\xc2\xa4"\n' self.assertEqual(eval(code), '\xa4') code = b'# -*- coding: latin1 -*-\n"\xc2\xa4"\n' self.assertEqual(eval(code), '\xc2\xa4') code = b'# -*- coding: utf-8 -*-\n"\xc2\xa4"\n' self.assertEqual(eval(code), '\xa4') code = b'# -*- coding: iso8859-15 -*-\n"\xc2\xa4"\n' self.assertEqual(eval(code), '\xc2\u20ac') code = '"""\\\n# -*- coding: iso8859-15 -*-\n\xc2\xa4"""\n' self.assertEqual(eval(code), '# -*- coding: iso8859-15 -*-\n\xc2\xa4') code = b'"""\\\n# -*- coding: iso8859-15 -*-\n\xc2\xa4"""\n' self.assertEqual(eval(code), '# -*- coding: iso8859-15 -*-\n\xa4') def test_subscripts(self): # SF bug 1448804 # Class to make testing subscript results easy class str_map(object): def __init__(self): self.data = {} def __getitem__(self, key): return self.data[str(key)] def __setitem__(self, key, value): self.data[str(key)] = value def __delitem__(self, key): del self.data[str(key)] def __contains__(self, key): return str(key) in self.data d = str_map() # Index d[1] = 1 self.assertEqual(d[1], 1) d[1] += 1 self.assertEqual(d[1], 2) del d[1] self.assertNotIn(1, d) # Tuple of indices d[1, 1] = 1 self.assertEqual(d[1, 1], 1) d[1, 1] += 1 self.assertEqual(d[1, 1], 2) del d[1, 1] self.assertNotIn((1, 1), d) # Simple slice d[1:2] = 1 self.assertEqual(d[1:2], 1) d[1:2] += 1 self.assertEqual(d[1:2], 2) del d[1:2] self.assertNotIn(slice(1, 2), d) # Tuple of simple slices d[1:2, 1:2] = 1 self.assertEqual(d[1:2, 1:2], 1) d[1:2, 1:2] += 1 self.assertEqual(d[1:2, 1:2], 2) del d[1:2, 1:2] self.assertNotIn((slice(1, 2), slice(1, 2)), d) # Extended slice d[1:2:3] = 1 self.assertEqual(d[1:2:3], 1) d[1:2:3] += 1 self.assertEqual(d[1:2:3], 2) del d[1:2:3] self.assertNotIn(slice(1, 2, 3), d) # Tuple of extended slices d[1:2:3, 1:2:3] = 1 self.assertEqual(d[1:2:3, 1:2:3], 1) d[1:2:3, 1:2:3] += 1 self.assertEqual(d[1:2:3, 1:2:3], 2) del d[1:2:3, 1:2:3] self.assertNotIn((slice(1, 2, 3), slice(1, 2, 3)), d) # Ellipsis d[...] = 1 self.assertEqual(d[...], 1) d[...] += 1 self.assertEqual(d[...], 2) del d[...] self.assertNotIn(Ellipsis, d) # Tuple of Ellipses d[..., ...] = 1 self.assertEqual(d[..., ...], 1) d[..., ...] += 1 self.assertEqual(d[..., ...], 2) del d[..., ...] self.assertNotIn((Ellipsis, Ellipsis), d) def test_annotation_limit(self): # more than 255 annotations, should compile ok s = "def f(%s): pass" s %= ', '.join('a%d:%d' % (i,i) for i in range(300)) compile(s, '?', 'exec') def test_mangling(self): class A: def f(): __mangled = 1 __not_mangled__ = 2 import __mangled_mod import __package__.module self.assertIn("_A__mangled", A.f.__code__.co_varnames) self.assertIn("__not_mangled__", A.f.__code__.co_varnames) self.assertIn("_A__mangled_mod", A.f.__code__.co_varnames) self.assertIn("__package__", A.f.__code__.co_varnames) def test_compile_ast(self): fname = __file__ if fname.lower().endswith('pyc'): fname = fname[:-1] with open(fname, encoding='utf-8') as f: fcontents = f.read() sample_code = [ ['<assign>', 'x = 5'], ['<ifblock>', """if True:\n pass\n"""], ['<forblock>', """for n in [1, 2, 3]:\n print(n)\n"""], ['<deffunc>', """def foo():\n pass\nfoo()\n"""], [fname, fcontents], ] for fname, code in sample_code: co1 = compile(code, '%s1' % fname, 'exec') ast = compile(code, '%s2' % fname, 'exec', _ast.PyCF_ONLY_AST) self.assertTrue(type(ast) == _ast.Module) co2 = compile(ast, '%s3' % fname, 'exec') self.assertEqual(co1, co2) # the code object's filename comes from the second compilation step self.assertEqual(co2.co_filename, '%s3' % fname) # raise exception when node type doesn't match with compile mode co1 = compile('print(1)', '<string>', 'exec', _ast.PyCF_ONLY_AST) self.assertRaises(TypeError, compile, co1, '<ast>', 'eval') # raise exception when node type is no start node self.assertRaises(TypeError, compile, _ast.If(), '<ast>', 'exec') # raise exception when node has invalid children ast = _ast.Module() ast.body = [_ast.BoolOp()] self.assertRaises(TypeError, compile, ast, '<ast>', 'exec') def test_dict_evaluation_order(self): i = 0 def f(): nonlocal i i += 1 return i d = {f(): f(), f(): f()} self.assertEqual(d, {1: 2, 3: 4}) def test_compile_filename(self): for filename in 'file.py', b'file.py': code = compile('pass', filename, 'exec') self.assertEqual(code.co_filename, 'file.py') for filename in bytearray(b'file.py'), memoryview(b'file.py'): with self.assertWarns(DeprecationWarning): code = compile('pass', filename, 'exec') self.assertEqual(code.co_filename, 'file.py') self.assertRaises(TypeError, compile, 'pass', list(b'file.py'), 'exec') @support.cpython_only def test_same_filename_used(self): s = """def f(): pass\ndef g(): pass""" c = compile(s, "myfile", "exec") for obj in c.co_consts: if isinstance(obj, types.CodeType): self.assertIs(obj.co_filename, c.co_filename) def test_single_statement(self): self.compile_single("1 + 2") self.compile_single("\n1 + 2") self.compile_single("1 + 2\n") self.compile_single("1 + 2\n\n") self.compile_single("1 + 2\t\t\n") self.compile_single("1 + 2\t\t\n ") self.compile_single("1 + 2 # one plus two") self.compile_single("1; 2") self.compile_single("import sys; sys") self.compile_single("def f():\n pass") self.compile_single("while False:\n pass") self.compile_single("if x:\n f(x)") self.compile_single("if x:\n f(x)\nelse:\n g(x)") self.compile_single("class T:\n pass") self.compile_single("c = '''\na=1\nb=2\nc=3\n'''") def test_bad_single_statement(self): self.assertInvalidSingle('1\n2') self.assertInvalidSingle('def f(): pass') self.assertInvalidSingle('a = 13\nb = 187') self.assertInvalidSingle('del x\ndel y') self.assertInvalidSingle('f()\ng()') self.assertInvalidSingle('f()\n# blah\nblah()') self.assertInvalidSingle('f()\nxy # blah\nblah()') self.assertInvalidSingle('x = 5 # comment\nx = 6\n') self.assertInvalidSingle("c = '''\nd=1\n'''\na = 1\n\nb = 2\n") def test_particularly_evil_undecodable(self): # Issue 24022 src = b'0000\x00\n00000000000\n\x00\n\x9e\n' with tempfile.TemporaryDirectory() as tmpd: fn = os.path.join(tmpd, "bad.py") with open(fn, "wb") as fp: fp.write(src) res = script_helper.run_python_until_end(fn)[0] self.assertIn(b"Non-UTF-8", res.err) def test_yet_more_evil_still_undecodable(self): # Issue #25388 src = b"#\x00\n#\xfd\n" with tempfile.TemporaryDirectory() as tmpd: fn = os.path.join(tmpd, "bad.py") with open(fn, "wb") as fp: fp.write(src) res = script_helper.run_python_until_end(fn)[0] self.assertIn(b"Non-UTF-8", res.err) @support.cpython_only def test_compiler_recursion_limit(self): # Expected limit is sys.getrecursionlimit() * the scaling factor # in symtable.c (currently 3) # We expect to fail *at* that limit, because we use up some of # the stack depth limit in the test suite code # So we check the expected limit and 75% of that # XXX (ncoghlan): duplicating the scaling factor here is a little # ugly. Perhaps it should be exposed somewhere... fail_depth = sys.getrecursionlimit() * 3 crash_depth = sys.getrecursionlimit() * 300 success_depth = int(fail_depth * 0.75) def check_limit(prefix, repeated, mode="single"): expect_ok = prefix + repeated * success_depth compile(expect_ok, '<test>', mode) for depth in (fail_depth, crash_depth): broken = prefix + repeated * depth details = "Compiling ({!r} + {!r} * {})".format( prefix, repeated, depth) with self.assertRaises(RecursionError, msg=details): compile(broken, '<test>', mode) check_limit("a", "()") check_limit("a", ".b") check_limit("a", "[0]") check_limit("a", "*a") # XXX Crashes in the parser. # check_limit("a", " if a else a") # check_limit("if a: pass", "\nelif a: pass", mode="exec") def test_null_terminated(self): # The source code is null-terminated internally, but bytes-like # objects are accepted, which could be not terminated. with self.assertRaisesRegex(ValueError, "cannot contain null"): compile("123\x00", "<dummy>", "eval") with self.assertRaisesRegex(ValueError, "cannot contain null"): compile(memoryview(b"123\x00"), "<dummy>", "eval") code = compile(memoryview(b"123\x00")[1:-1], "<dummy>", "eval") self.assertEqual(eval(code), 23) code = compile(memoryview(b"1234")[1:-1], "<dummy>", "eval") self.assertEqual(eval(code), 23) code = compile(memoryview(b"$23$")[1:-1], "<dummy>", "eval") self.assertEqual(eval(code), 23) # Also test when eval() and exec() do the compilation step self.assertEqual(eval(memoryview(b"1234")[1:-1]), 23) namespace = dict() exec(memoryview(b"ax = 123")[1:-1], namespace) self.assertEqual(namespace['x'], 12) def check_constant(self, func, expected): for const in func.__code__.co_consts: if repr(const) == repr(expected): break else: self.fail("unable to find constant %r in %r" % (expected, func.__code__.co_consts)) # Merging equal constants is not a strict requirement for the Python # semantics, it's a more an implementation detail. @support.cpython_only def test_merge_constants(self): # Issue #25843: compile() must merge constants which are equal # and have the same type. def check_same_constant(const): ns = {} code = "f1, f2 = lambda: %r, lambda: %r" % (const, const) exec(code, ns) f1 = ns['f1'] f2 = ns['f2'] self.assertIs(f1.__code__, f2.__code__) self.check_constant(f1, const) self.assertEqual(repr(f1()), repr(const)) check_same_constant(None) check_same_constant(0) check_same_constant(0.0) check_same_constant(b'abc') check_same_constant('abc') # Note: "lambda: ..." emits "LOAD_CONST Ellipsis", # whereas "lambda: Ellipsis" emits "LOAD_GLOBAL Ellipsis" f1, f2 = lambda: ..., lambda: ... self.assertIs(f1.__code__, f2.__code__) self.check_constant(f1, Ellipsis) self.assertEqual(repr(f1()), repr(Ellipsis)) # Merge constants in tuple or frozenset f1, f2 = lambda: "not a name", lambda: ("not a name",) f3 = lambda x: x in {("not a name",)} self.assertIs(f1.__code__.co_consts[1], f2.__code__.co_consts[1][0]) self.assertIs(next(iter(f3.__code__.co_consts[1])), f2.__code__.co_consts[1]) # {0} is converted to a constant frozenset({0}) by the peephole # optimizer f1, f2 = lambda x: x in {0}, lambda x: x in {0} self.assertIs(f1.__code__, f2.__code__) self.check_constant(f1, frozenset({0})) self.assertTrue(f1(0)) # Merging equal co_linetable and co_code is not a strict requirement # for the Python semantics, it's a more an implementation detail. @support.cpython_only def test_merge_code_attrs(self): # See https://bugs.python.org/issue42217 f1 = lambda x: x.y.z f2 = lambda a: a.b.c self.assertIs(f1.__code__.co_linetable, f2.__code__.co_linetable) self.assertIs(f1.__code__.co_code, f2.__code__.co_code) # Stripping unused constants is not a strict requirement for the # Python semantics, it's a more an implementation detail. @support.cpython_only def test_strip_unused_consts(self): # Python 3.10rc1 appended None to co_consts when None is not used # at all. See bpo-45056. def f1(): "docstring" return 42 self.assertEqual(f1.__code__.co_consts, ("docstring", 42)) # This is a regression test for a CPython specific peephole optimizer # implementation bug present in a few releases. It's assertion verifies # that peephole optimization was actually done though that isn't an # indication of the bugs presence or not (crashing is). @support.cpython_only def test_peephole_opt_unreachable_code_array_access_in_bounds(self): """Regression test for issue35193 when run under clang msan.""" def unused_code_at_end(): return 3 raise RuntimeError("unreachable") # The above function definition will trigger the out of bounds # bug in the peephole optimizer as it scans opcodes past the # RETURN_VALUE opcode. This does not always crash an interpreter. # When you build with the clang memory sanitizer it reliably aborts. self.assertEqual( 'RETURN_VALUE', list(dis.get_instructions(unused_code_at_end))[-1].opname) def test_dont_merge_constants(self): # Issue #25843: compile() must not merge constants which are equal # but have a different type. def check_different_constants(const1, const2): ns = {} exec("f1, f2 = lambda: %r, lambda: %r" % (const1, const2), ns) f1 = ns['f1'] f2 = ns['f2'] self.assertIsNot(f1.__code__, f2.__code__) self.assertNotEqual(f1.__code__, f2.__code__) self.check_constant(f1, const1) self.check_constant(f2, const2) self.assertEqual(repr(f1()), repr(const1)) self.assertEqual(repr(f2()), repr(const2)) check_different_constants(0, 0.0) check_different_constants(+0.0, -0.0) check_different_constants((0,), (0.0,)) check_different_constants('a', b'a') check_different_constants(('a',), (b'a',)) # check_different_constants() cannot be used because repr(-0j) is # '(-0-0j)', but when '(-0-0j)' is evaluated to 0j: we loose the sign. f1, f2 = lambda: +0.0j, lambda: -0.0j self.assertIsNot(f1.__code__, f2.__code__) self.check_constant(f1, +0.0j) self.check_constant(f2, -0.0j) self.assertEqual(repr(f1()), repr(+0.0j)) self.assertEqual(repr(f2()), repr(-0.0j)) # {0} is converted to a constant frozenset({0}) by the peephole # optimizer f1, f2 = lambda x: x in {0}, lambda x: x in {0.0} self.assertIsNot(f1.__code__, f2.__code__) self.check_constant(f1, frozenset({0})) self.check_constant(f2, frozenset({0.0})) self.assertTrue(f1(0)) self.assertTrue(f2(0.0)) def test_path_like_objects(self): # An implicit test for PyUnicode_FSDecoder(). compile("42", FakePath("test_compile_pathlike"), "single") def test_stack_overflow(self): # bpo-31113: Stack overflow when compile a long sequence of # complex statements. compile("if a: b\n" * 200000, "<dummy>", "exec") # Multiple users rely on the fact that CPython does not generate # bytecode for dead code blocks. See bpo-37500 for more context. @support.cpython_only def test_dead_blocks_do_not_generate_bytecode(self): def unused_block_if(): if 0: return 42 def unused_block_while(): while 0: return 42 def unused_block_if_else(): if 1: return None else: return 42 def unused_block_while_else(): while 1: return None else: return 42 funcs = [unused_block_if, unused_block_while, unused_block_if_else, unused_block_while_else] for func in funcs: opcodes = list(dis.get_instructions(func)) self.assertLessEqual(len(opcodes), 4) self.assertEqual('LOAD_CONST', opcodes[-2].opname) self.assertEqual(None, opcodes[-2].argval) self.assertEqual('RETURN_VALUE', opcodes[-1].opname) def test_false_while_loop(self): def break_in_while(): while False: break def continue_in_while(): while False: continue funcs = [break_in_while, continue_in_while] # Check that we did not raise but we also don't generate bytecode for func in funcs: opcodes = list(dis.get_instructions(func)) self.assertEqual(3, len(opcodes)) self.assertEqual('LOAD_CONST', opcodes[1].opname) self.assertEqual(None, opcodes[1].argval) self.assertEqual('RETURN_VALUE', opcodes[2].opname) def test_consts_in_conditionals(self): def and_true(x): return True and x def and_false(x): return False and x def or_true(x): return True or x def or_false(x): return False or x funcs = [and_true, and_false, or_true, or_false] # Check that condition is removed. for func in funcs: with self.subTest(func=func): opcodes = list(dis.get_instructions(func)) self.assertLessEqual(len(opcodes), 3) self.assertIn('LOAD_', opcodes[-2].opname) self.assertEqual('RETURN_VALUE', opcodes[-1].opname) def test_imported_load_method(self): sources = [ """\ import os def foo(): return os.uname() """, """\ import os as operating_system def foo(): return operating_system.uname() """, """\ from os import path def foo(x): return path.join(x) """, """\ from os import path as os_path def foo(x): return os_path.join(x) """ ] for source in sources: namespace = {} exec(textwrap.dedent(source), namespace) func = namespace['foo'] with self.subTest(func=func.__name__): opcodes = list(dis.get_instructions(func)) instructions = [opcode.opname for opcode in opcodes] self.assertNotIn('LOAD_METHOD', instructions) self.assertIn('LOAD_ATTR', instructions) self.assertIn('PRECALL', instructions) def test_lineno_procedure_call(self): def call(): ( print() ) line1 = call.__code__.co_firstlineno + 1 assert line1 not in [line for (_, _, line) in call.__code__.co_lines()] def test_lineno_after_implicit_return(self): TRUE = True # Don't use constant True or False, as compiler will remove test def if1(x): x() if TRUE: pass def if2(x): x() if TRUE: pass else: pass def if3(x): x() if TRUE: pass else: return None def if4(x): x() if not TRUE: pass funcs = [ if1, if2, if3, if4] lastlines = [ 3, 3, 3, 2] frame = None def save_caller_frame(): nonlocal frame frame = sys._getframe(1) for func, lastline in zip(funcs, lastlines, strict=True): with self.subTest(func=func): func(save_caller_frame) self.assertEqual(frame.f_lineno-frame.f_code.co_firstlineno, lastline) def test_lineno_after_no_code(self): def no_code1(): "doc string" def no_code2(): a: int for func in (no_code1, no_code2): with self.subTest(func=func): code = func.__code__ lines = list(code.co_lines()) self.assertEqual(len(lines), 1) start, end, line = lines[0] self.assertEqual(start, 0) self.assertEqual(end, len(code.co_code)) self.assertEqual(line, code.co_firstlineno) def test_lineno_attribute(self): def load_attr(): return ( o. a ) load_attr_lines = [ 0, 2, 3, 1 ] def load_method(): return ( o. m( 0 ) ) load_method_lines = [ 0, 2, 3, 4, 3, 1 ] def store_attr(): ( o. a ) = ( v ) store_attr_lines = [ 0, 5, 2, 3 ] def aug_store_attr(): ( o. a ) += ( v ) aug_store_attr_lines = [ 0, 2, 3, 5, 1, 3 ] funcs = [ load_attr, load_method, store_attr, aug_store_attr] func_lines = [ load_attr_lines, load_method_lines, store_attr_lines, aug_store_attr_lines] for func, lines in zip(funcs, func_lines, strict=True): with self.subTest(func=func): code_lines = [ line-func.__code__.co_firstlineno for (_, _, line) in func.__code__.co_lines() if line is not None ] self.assertEqual(lines, code_lines) def test_line_number_genexp(self): def return_genexp(): return (1 for x in y) genexp_lines = [1, 3, 1] genexp_code = return_genexp.__code__.co_consts[1] code_lines = [ None if line is None else line-return_genexp.__code__.co_firstlineno for (_, _, line) in genexp_code.co_lines() ] self.assertEqual(genexp_lines, code_lines) def test_line_number_implicit_return_after_async_for(self): async def test(aseq): async for i in aseq: body expected_lines = [0, 1, 2, 1] code_lines = [ None if line is None else line-test.__code__.co_firstlineno for (_, _, line) in test.__code__.co_lines() ] self.assertEqual(expected_lines, code_lines) def test_big_dict_literal(self): # The compiler has a flushing point in "compiler_dict" that calls compiles # a portion of the dictionary literal when the loop that iterates over the items # reaches 0xFFFF elements but the code was not including the boundary element, # dropping the key at position 0xFFFF. See bpo-41531 for more information dict_size = 0xFFFF + 1 the_dict = "{" + ",".join(f"{x}:{x}" for x in range(dict_size)) + "}" self.assertEqual(len(eval(the_dict)), dict_size) def test_redundant_jump_in_if_else_break(self): # Check if bytecode containing jumps that simply point to the next line # is generated around if-else-break style structures. See bpo-42615. def if_else_break(): val = 1 while True: if val > 0: val -= 1 else: break val = -1 INSTR_SIZE = 2 HANDLED_JUMPS = ( 'POP_JUMP_IF_FALSE', 'POP_JUMP_IF_TRUE', 'JUMP_ABSOLUTE', 'JUMP_FORWARD', ) for line, instr in enumerate( dis.Bytecode(if_else_break, show_caches=True) ): if instr.opname == 'JUMP_FORWARD': self.assertNotEqual(instr.arg, 0) elif instr.opname in HANDLED_JUMPS: self.assertNotEqual(instr.arg, (line + 1)*INSTR_SIZE) def test_no_wraparound_jump(self): # See https://bugs.python.org/issue46724 def while_not_chained(a, b, c): while not (a < b < c): pass for instr in dis.Bytecode(while_not_chained): self.assertNotEqual(instr.opname, "EXTENDED_ARG") @requires_debug_ranges() class TestSourcePositions(unittest.TestCase): # Ensure that compiled code snippets have correct line and column numbers # in `co_positions()`. def check_positions_against_ast(self, snippet): # Basic check that makes sure each line and column is at least present # in one of the AST nodes of the source code. code = compile(snippet, 'test_compile.py', 'exec') ast_tree = compile(snippet, 'test_compile.py', 'exec', _ast.PyCF_ONLY_AST) self.assertTrue(type(ast_tree) == _ast.Module) # Use an AST visitor that notes all the offsets. lines, end_lines, columns, end_columns = set(), set(), set(), set() class SourceOffsetVisitor(ast.NodeVisitor): def generic_visit(self, node): super().generic_visit(node) if not isinstance(node, ast.expr) and not isinstance(node, ast.stmt): return lines.add(node.lineno) end_lines.add(node.end_lineno) columns.add(node.col_offset) end_columns.add(node.end_col_offset) SourceOffsetVisitor().visit(ast_tree) # Check against the positions in the code object. for (line, end_line, col, end_col) in code.co_positions(): # If the offset is not None (indicating missing data), ensure that # it was part of one of the AST nodes. if line is not None: self.assertIn(line, lines) if end_line is not None: self.assertIn(end_line, end_lines) if col is not None: self.assertIn(col, columns) if end_col is not None: self.assertIn(end_col, end_columns) return code, ast_tree def assertOpcodeSourcePositionIs(self, code, opcode, line, end_line, column, end_column, occurrence=1): for instr, position in zip( dis.Bytecode(code, show_caches=True), code.co_positions(), strict=True ): if instr.opname == opcode: occurrence -= 1 if not occurrence: self.assertEqual(position[0], line) self.assertEqual(position[1], end_line) self.assertEqual(position[2], column) self.assertEqual(position[3], end_column) return self.fail(f"Opcode {opcode} not found in code") def test_simple_assignment(self): snippet = "x = 1" self.check_positions_against_ast(snippet) def test_compiles_to_extended_op_arg(self): # Make sure we still have valid positions when the code compiles to an # EXTENDED_ARG by performing a loop which needs a JUMP_ABSOLUTE after # a bunch of opcodes. snippet = "x = x\n" * 10_000 snippet += ("while x != 0:\n" " x -= 1\n" "while x != 0:\n" " x += 1\n" ) compiled_code, _ = self.check_positions_against_ast(snippet) self.assertOpcodeSourcePositionIs(compiled_code, 'BINARY_OP', line=10_000 + 2, end_line=10_000 + 2, column=2, end_column=8, occurrence=1) self.assertOpcodeSourcePositionIs(compiled_code, 'BINARY_OP', line=10_000 + 4, end_line=10_000 + 4, column=2, end_column=9, occurrence=2) def test_multiline_expression(self): snippet = """\ f( 1, 2, 3, 4 ) """ compiled_code, _ = self.check_positions_against_ast(snippet) self.assertOpcodeSourcePositionIs(compiled_code, 'CALL', line=1, end_line=3, column=0, end_column=1) def test_very_long_line_end_offset(self): # Make sure we get None for when the column offset is too large to # store in a byte. long_string = "a" * 1000 snippet = f"g('{long_string}')" compiled_code, _ = self.check_positions_against_ast(snippet) self.assertOpcodeSourcePositionIs(compiled_code, 'CALL', line=1, end_line=1, column=None, end_column=None) def test_complex_single_line_expression(self): snippet = "a - b @ (c * x['key'] + 23)" compiled_code, _ = self.check_positions_against_ast(snippet) self.assertOpcodeSourcePositionIs(compiled_code, 'BINARY_SUBSCR', line=1, end_line=1, column=13, end_column=21) self.assertOpcodeSourcePositionIs(compiled_code, 'BINARY_OP', line=1, end_line=1, column=9, end_column=21, occurrence=1) self.assertOpcodeSourcePositionIs(compiled_code, 'BINARY_OP', line=1, end_line=1, column=9, end_column=26, occurrence=2) self.assertOpcodeSourcePositionIs(compiled_code, 'BINARY_OP', line=1, end_line=1, column=4, end_column=27, occurrence=3) self.assertOpcodeSourcePositionIs(compiled_code, 'BINARY_OP', line=1, end_line=1, column=0, end_column=27, occurrence=4) class TestExpressionStackSize(unittest.TestCase): # These tests check that the computed stack size for a code object # stays within reasonable bounds (see issue #21523 for an example # dysfunction). N = 100 def check_stack_size(self, code): # To assert that the alleged stack size is not O(N), we # check that it is smaller than log(N). if isinstance(code, str): code = compile(code, "<foo>", "single") max_size = math.ceil(math.log(len(code.co_code))) self.assertLessEqual(code.co_stacksize, max_size) def test_and(self): self.check_stack_size("x and " * self.N + "x") def test_or(self): self.check_stack_size("x or " * self.N + "x") def test_and_or(self): self.check_stack_size("x and x or " * self.N + "x") def test_chained_comparison(self): self.check_stack_size("x < " * self.N + "x") def test_if_else(self): self.check_stack_size("x if x else " * self.N + "x") def test_binop(self): self.check_stack_size("x + " * self.N + "x") def test_list(self): self.check_stack_size("[" + "x, " * self.N + "x]") def test_tuple(self): self.check_stack_size("(" + "x, " * self.N + "x)") def test_set(self): self.check_stack_size("{" + "x, " * self.N + "x}") def test_dict(self): self.check_stack_size("{" + "x:x, " * self.N + "x:x}") def test_func_args(self): self.check_stack_size("f(" + "x, " * self.N + ")") def test_func_kwargs(self): kwargs = (f'a{i}=x' for i in range(self.N)) self.check_stack_size("f(" + ", ".join(kwargs) + ")") def test_func_args(self): self.check_stack_size("o.m(" + "x, " * self.N + ")") def test_meth_kwargs(self): kwargs = (f'a{i}=x' for i in range(self.N)) self.check_stack_size("o.m(" + ", ".join(kwargs) + ")") def test_func_and(self): code = "def f(x):\n" code += " x and x\n" * self.N self.check_stack_size(code) class TestStackSizeStability(unittest.TestCase): # Check that repeating certain snippets doesn't increase the stack size # beyond what a single snippet requires. def check_stack_size(self, snippet, async_=False): def compile_snippet(i): ns = {} script = """def func():\n""" + i * snippet if async_: script = "async " + script code = compile(script, "<script>", "exec") exec(code, ns, ns) return ns['func'].__code__ sizes = [compile_snippet(i).co_stacksize for i in range(2, 5)] if len(set(sizes)) != 1: import dis, io out = io.StringIO() dis.dis(compile_snippet(1), file=out) self.fail("stack sizes diverge with # of consecutive snippets: " "%s\n%s\n%s" % (sizes, snippet, out.getvalue())) def test_if(self): snippet = """ if x: a """ self.check_stack_size(snippet) def test_if_else(self): snippet = """ if x: a elif y: b else: c """ self.check_stack_size(snippet) def test_try_except_bare(self): snippet = """ try: a except: b """ self.check_stack_size(snippet) def test_try_except_qualified(self): snippet = """ try: a except ImportError: b except: c else: d """ self.check_stack_size(snippet) def test_try_except_as(self): snippet = """ try: a except ImportError as e: b except: c else: d """ self.check_stack_size(snippet) def test_try_except_star_qualified(self): snippet = """ try: a except* ImportError: b else: c """ self.check_stack_size(snippet) def test_try_except_star_as(self): snippet = """ try: a except* ImportError as e: b else: c """ self.check_stack_size(snippet) def test_try_except_star_finally(self): snippet = """ try: a except* A: b finally: c """ self.check_stack_size(snippet) def test_try_finally(self): snippet = """ try: a finally: b """ self.check_stack_size(snippet) def test_with(self): snippet = """ with x as y: a """ self.check_stack_size(snippet) def test_while_else(self): snippet = """ while x: a else: b """ self.check_stack_size(snippet) def test_for(self): snippet = """ for x in y: a """ self.check_stack_size(snippet) def test_for_else(self): snippet = """ for x in y: a else: b """ self.check_stack_size(snippet) def test_for_break_continue(self): snippet = """ for x in y: if z: break elif u: continue else: a else: b """ self.check_stack_size(snippet) def test_for_break_continue_inside_try_finally_block(self): snippet = """ for x in y: try: if z: break elif u: continue else: a finally: f else: b """ self.check_stack_size(snippet) def test_for_break_continue_inside_finally_block(self): snippet = """ for x in y: try: t finally: if z: break elif u: continue else: a else: b """ self.check_stack_size(snippet) def test_for_break_continue_inside_except_block(self): snippet = """ for x in y: try: t except: if z: break elif u: continue else: a else: b """ self.check_stack_size(snippet) def test_for_break_continue_inside_with_block(self): snippet = """ for x in y: with c: if z: break elif u: continue else: a else: b """ self.check_stack_size(snippet) def test_return_inside_try_finally_block(self): snippet = """ try: if z: return else: a finally: f """ self.check_stack_size(snippet) def test_return_inside_finally_block(self): snippet = """ try: t finally: if z: return else: a """ self.check_stack_size(snippet) def test_return_inside_except_block(self): snippet = """ try: t except: if z: return else: a """ self.check_stack_size(snippet) def test_return_inside_with_block(self): snippet = """ with c: if z: return else: a """ self.check_stack_size(snippet) def test_async_with(self): snippet = """ async with x as y: a """ self.check_stack_size(snippet, async_=True) def test_async_for(self): snippet = """ async for x in y: a """ self.check_stack_size(snippet, async_=True) def test_async_for_else(self): snippet = """ async for x in y: a else: b """ self.check_stack_size(snippet, async_=True) def test_for_break_continue_inside_async_with_block(self): snippet = """ for x in y: async with c: if z: break elif u: continue else: a else: b """ self.check_stack_size(snippet, async_=True) def test_return_inside_async_with_block(self): snippet = """ async with c: if z: return else: a """ self.check_stack_size(snippet, async_=True) if __name__ == "__main__": unittest.main()
examples/wagipython/wagi-python/opt/wasi-python/lib/python3.11/test/test_compile.py
52,814
Non-mapping Test mapping interface versus possible calls from eval(). docstring doc string Regression test for issue35193 when run under clang msan. catch assignments to __debug__ detect duplicate positional and keyword arguments Verify that dict subclasses work as well testing bad float literals testing compile() of indented block w/o trailing newline" This test is probably specific to CPython and may not generalize to other implementations. We are trying to ensure that when the first line of code starts after 256, correct line numbers in tracebacks are still produced. Verify treatment of unary minus on negative numbers SF bug 660455 32-bit machine 64-bit machine Verify treatment of constant folding on -(sys.maxsize+1) i.e. -2147483648 on 32 bit platforms. Should return int. 1 << 32 1 << 32 1 << 48 1 << 48 1 << 62 1 << 62 1 << 63 - 1 1 << 63 - 1 Verify sequence packing/unpacking with "or". SF bug 757818 SF bug 1048870 SF bug 1448804 Class to make testing subscript results easy Index Tuple of indices Simple slice Tuple of simple slices Extended slice Tuple of extended slices Ellipsis Tuple of Ellipses more than 255 annotations, should compile ok the code object's filename comes from the second compilation step raise exception when node type doesn't match with compile mode raise exception when node type is no start node raise exception when node has invalid children Issue 24022 Issue 25388 Expected limit is sys.getrecursionlimit() * the scaling factor in symtable.c (currently 3) We expect to fail *at* that limit, because we use up some of the stack depth limit in the test suite code So we check the expected limit and 75% of that XXX (ncoghlan): duplicating the scaling factor here is a little ugly. Perhaps it should be exposed somewhere... XXX Crashes in the parser. check_limit("a", " if a else a") check_limit("if a: pass", "\nelif a: pass", mode="exec") The source code is null-terminated internally, but bytes-like objects are accepted, which could be not terminated. Also test when eval() and exec() do the compilation step Merging equal constants is not a strict requirement for the Python semantics, it's a more an implementation detail. Issue 25843: compile() must merge constants which are equal and have the same type. Note: "lambda: ..." emits "LOAD_CONST Ellipsis", whereas "lambda: Ellipsis" emits "LOAD_GLOBAL Ellipsis" Merge constants in tuple or frozenset {0} is converted to a constant frozenset({0}) by the peephole optimizer Merging equal co_linetable and co_code is not a strict requirement for the Python semantics, it's a more an implementation detail. See https://bugs.python.org/issue42217 Stripping unused constants is not a strict requirement for the Python semantics, it's a more an implementation detail. Python 3.10rc1 appended None to co_consts when None is not used at all. See bpo-45056. This is a regression test for a CPython specific peephole optimizer implementation bug present in a few releases. It's assertion verifies that peephole optimization was actually done though that isn't an indication of the bugs presence or not (crashing is). The above function definition will trigger the out of bounds bug in the peephole optimizer as it scans opcodes past the RETURN_VALUE opcode. This does not always crash an interpreter. When you build with the clang memory sanitizer it reliably aborts. Issue 25843: compile() must not merge constants which are equal but have a different type. check_different_constants() cannot be used because repr(-0j) is '(-0-0j)', but when '(-0-0j)' is evaluated to 0j: we loose the sign. {0} is converted to a constant frozenset({0}) by the peephole optimizer An implicit test for PyUnicode_FSDecoder(). bpo-31113: Stack overflow when compile a long sequence of complex statements. Multiple users rely on the fact that CPython does not generate bytecode for dead code blocks. See bpo-37500 for more context. Check that we did not raise but we also don't generate bytecode Check that condition is removed. Don't use constant True or False, as compiler will remove test The compiler has a flushing point in "compiler_dict" that calls compiles a portion of the dictionary literal when the loop that iterates over the items reaches 0xFFFF elements but the code was not including the boundary element, dropping the key at position 0xFFFF. See bpo-41531 for more information Check if bytecode containing jumps that simply point to the next line is generated around if-else-break style structures. See bpo-42615. See https://bugs.python.org/issue46724 Ensure that compiled code snippets have correct line and column numbers in `co_positions()`. Basic check that makes sure each line and column is at least present in one of the AST nodes of the source code. Use an AST visitor that notes all the offsets. Check against the positions in the code object. If the offset is not None (indicating missing data), ensure that it was part of one of the AST nodes. Make sure we still have valid positions when the code compiles to an EXTENDED_ARG by performing a loop which needs a JUMP_ABSOLUTE after a bunch of opcodes. Make sure we get None for when the column offset is too large to store in a byte. These tests check that the computed stack size for a code object stays within reasonable bounds (see issue 21523 for an example dysfunction). To assert that the alleged stack size is not O(N), we check that it is smaller than log(N). Check that repeating certain snippets doesn't increase the stack size beyond what a single snippet requires.
5,529
en
0.850983
# Python import pytest from unittest import mock from contextlib import contextmanager from awx.main.models import Credential, UnifiedJob from awx.main.tests.factories import ( create_organization, create_job_template, create_instance, create_instance_group, create_notification_template, create_survey_spec, create_workflow_job_template, ) from django.core.cache import cache def pytest_addoption(parser): parser.addoption("--genschema", action="store_true", default=False, help="execute schema validator") def pytest_configure(config): import sys sys._called_from_test = True def pytest_unconfigure(config): import sys del sys._called_from_test @pytest.fixture def mock_access(): @contextmanager def access_given_class(TowerClass): try: mock_instance = mock.MagicMock(__name__='foobar') MockAccess = mock.MagicMock(return_value=mock_instance) the_patch = mock.patch.dict('awx.main.access.access_registry', {TowerClass: MockAccess}, clear=False) the_patch.__enter__() yield mock_instance finally: the_patch.__exit__() return access_given_class @pytest.fixture def job_template_factory(): return create_job_template @pytest.fixture def organization_factory(): return create_organization @pytest.fixture def notification_template_factory(): return create_notification_template @pytest.fixture def survey_spec_factory(): return create_survey_spec @pytest.fixture def instance_factory(): return create_instance @pytest.fixture def instance_group_factory(): return create_instance_group @pytest.fixture def default_instance_group(instance_factory, instance_group_factory): return create_instance_group("default", instances=[create_instance("hostA")]) @pytest.fixture def controlplane_instance_group(instance_factory, instance_group_factory): return create_instance_group("controlplane", instances=[create_instance("hostA")]) @pytest.fixture def job_template_with_survey_passwords_factory(job_template_factory): def rf(persisted): "Returns job with linked JT survey with password survey questions" objects = job_template_factory( 'jt', organization='org1', survey=[ {'variable': 'submitter_email', 'type': 'text', 'default': '[email protected]'}, {'variable': 'secret_key', 'default': '6kQngg3h8lgiSTvIEb21', 'type': 'password'}, {'variable': 'SSN', 'type': 'password'}, ], persisted=persisted, ) return objects.job_template return rf @pytest.fixture def job_with_secret_key_unit(job_with_secret_key_factory): return job_with_secret_key_factory(persisted=False) @pytest.fixture def workflow_job_template_factory(): return create_workflow_job_template @pytest.fixture def job_template_with_survey_passwords_unit(job_template_with_survey_passwords_factory): return job_template_with_survey_passwords_factory(persisted=False) @pytest.fixture def mock_cache(): class MockCache(object): cache = {} def get(self, key, default=None): return self.cache.get(key, default) def set(self, key, value, timeout=60): self.cache[key] = value def delete(self, key): del self.cache[key] return MockCache() def pytest_runtest_teardown(item, nextitem): # clear Django cache at the end of every test ran # NOTE: this should not be memcache (as it is deprecated), nor should it be redis. # This is a local test cache, so we want every test to start with an empty cache cache.clear() @pytest.fixture(scope='session', autouse=True) def mock_external_credential_input_sources(): # Credential objects query their related input sources on initialization. # We mock that behavior out of credentials by default unless we need to # test it explicitly. with mock.patch.object(Credential, 'dynamic_input_fields', new=[]) as _fixture: yield _fixture @pytest.fixture(scope='session', autouse=True) def mock_has_unpartitioned_events(): # has_unpartitioned_events determines if there are any events still # left in the old, unpartitioned job events table. In order to work, # this method looks up when the partition migration occurred. When # Django's unit tests run, however, there will be no record of the migration. # We mock this out to circumvent the migration query. with mock.patch.object(UnifiedJob, 'has_unpartitioned_events', new=False) as _fixture: yield _fixture @pytest.fixture(scope='session', autouse=True) def mock_get_event_queryset_no_job_created(): """ SQLite friendly since partitions aren't supported. Do not add the faked job_created field to the filter. If we do, it will result in an sql query for the job_created field. That field does not actually exist in a non-partition scenario. """ def event_qs(self): kwargs = {self.event_parent_key: self.id} return self.event_class.objects.filter(**kwargs) with mock.patch.object(UnifiedJob, 'get_event_queryset', lambda self: event_qs(self)) as _fixture: yield _fixture
awx/main/tests/conftest.py
5,287
SQLite friendly since partitions aren't supported. Do not add the faked job_created field to the filter. If we do, it will result in an sql query for the job_created field. That field does not actually exist in a non-partition scenario. Returns job with linked JT survey with password survey questions Python clear Django cache at the end of every test ran NOTE: this should not be memcache (as it is deprecated), nor should it be redis. This is a local test cache, so we want every test to start with an empty cache Credential objects query their related input sources on initialization. We mock that behavior out of credentials by default unless we need to test it explicitly. has_unpartitioned_events determines if there are any events still left in the old, unpartitioned job events table. In order to work, this method looks up when the partition migration occurred. When Django's unit tests run, however, there will be no record of the migration. We mock this out to circumvent the migration query.
1,006
en
0.890625
#!/usr/bin/env python3 import sys import os import re import argparse import requests from bs4 import BeautifulSoup as bs version=1.1 print("""\033[1;36m ╦ ╦╔═╗╔╗ ╦═╗╔═╗╔═╗╔╦╗╔═╗╦═╗ ║║║║╣ ╠╩╗ ╠╦╝║╣ ╠═╣║║║║╣ ╠╦╝ ╚╩╝╚═╝╚═╝────╩╚═╚═╝╩ ╩╩ ╩╚═╝╩╚═ 🔗🔥🔗🔥🔗🔥🔗🔥🔗🔥🔗🔥🔗🔥🔗🔥 --> Coded by FEBIN 🛡️🌐 \033[1;39m""") def febrev_fuzz(url): import requests os.system("clear") feblist=open("admin-panel.txt","r+") text=str(feblist.read()) adminpages=list(text.split()) feblist.close() print(f""" [\033[1;37m+\033[1;39m] STARTED CRAWLING TO FIND ADMIN PANEL OF URL : \033[1;34m{url} """) try: if url.startswith("https://") or url.startswith("http://"): url=url else: print("Error : INVALID URL ! URL must start with 'http://' or 'https://'") exit() if url.endswith("/"): url=url server=requests.get(url).headers.get('Server') print(f"\033[1;37m SERVER Type >> {server}") print("\n<----------------------------------------------------------------------------------->") print(" ") else: url=f"{url}/" server=requests.get(url).headers.get('Server') print(f"\033[1;37mSERVER Type >> {server}") print("\n<----------------------------------------------------------------------------------->") print(" ") for i in range(len(adminpages)): reqresp=requests.get(f"{url}/{adminpages[i]}",timeout=10) if reqresp.status_code == 200: print(f"\033[1;39m FOUND ==> {url}{adminpages[i]} \033[1;34m") elif reqresp.status_code == 302: print("\033[1;39m FOUND 302 ==> {url}{adminpages[i]} \033[1;34m") else: pass except requests.exceptions.ConnectionError: print("[\033[1;31m-\033[1;39m] Connection to the Server Failed, May be invalid URL or bad Internet connection. Check Your Internet connection,URL and try again\n ") except requests.exceptions.ReadTimeout: print("\033[1;31m [\033[1;31m-\033[1;39m] Error : EXECUTION STOPPED DUE TO !TIMED OUT! ERROR, YOUR INTERNET MAY BE DISCONNECTED!!!....EXITTED") print("\033[1;37m WEB_REAMER Execution Completed. \033[1;33m!HAPPY HACKING! \033[1;34m \n") def sub_brute(domain,sublist): if os.path.isfile(sublist): print(f"[\033[1;37m+\033[1;39m] Subdomain wordlist {sublist} loaded -> OK") print("") pass else: print(f"[\033[1;31m-\033[1;39m] Wordlist {sublist} not found!!") exit() sub=open(sublist,"r+") subs=sub.read().split("\n") sub.close() for host in subs: try: req=requests.get(f"http://{host}.{domain}") print(f"\033[1;39m{host}.{domain} --> \033[1;37m{req.status_code}") except requests.exceptions.ConnectionError: pass except UnicodeError: pass print("") print("[\033[1;37m+\033[1;39m] Finshed!") print("\033[1;37m WEB_REAMER Execution Completed. \033[1;33m!HAPPY HACKING! \033[1;34m \n") def wordlistgen(url,filepath): import requests from bs4 import BeautifulSoup print("") try: webpage=requests.get(url) pagedata=webpage.text soup=BeautifulSoup(pagedata,"html.parser") except requests.exceptions.ConnectionError: print("\033[1;31m[-] ERROR CONNECTING THE SERVER...") exit() for script in soup(["script","style"]): script.extract() text1=soup.get_text() text=str(text1.strip()) feb=text.split() iscount=feb.count('is') wascount=feb.count('was') arecount=feb.count('are') forcount=feb.count('for') thecount=feb.count('the') ofcount=feb.count('of') tocount=feb.count('to') try: isinit=0 while isinit<=iscount: feb.remove('is') isinit=isinit+1 wasinit=0 while wasinit<=wascount: feb.remove('was') wasinit=wasinit+1 areinit=0 while areinit<=arecount: feb.remove('are') areinit=areinit+1 forinit=0 while forinit<=forcount: feb.remove('for') forinit=forinit+1 theinit=0 while theinit<=thecount: feb.remove('the') theinit=theinit+1 ofinit=0 while ofinit<=ofcount: feb.remove('of') ofinit=ofinit+1 toinit=0 while toinit<=tocount: feb.remove('to') toinit=toinit+1 except ValueError: pass feb.sort() for string in feb: count=feb.count(string) strinit=0 while strinit < count: feb.remove(string) strinit=strinit+1 feb.sort() for i in range(len(feb)): try: file=open(filepath,"a+") file.write("\n"+feb[i]) file.close() except FileNotFoundError: homedir=os.environ.get('HOME') file=open(f"{homedir}/fr-wordlist.txt","a+") file.write("\n"+feb[i]) file.close() if os.path.isfile(filepath): print("") print(f"\033[1;39m[\033[1;37m+\033[1;39m]Wordlist {filepath} successfully witten") else: print("\033[1;31m[-]Sorry:Path not Found!! The Path You Specified Doesn't Exist") print("So Saved the wordlist as fr-wordlist.txt in the HOME Directory of the current User.....") print("\033[1;37m WEB_REAMER Execution Completed. \033[1;33m!HAPPY HACKING! \033[1;34m \n") def word_analyze(url): import requests from bs4 import BeautifulSoup print("") try: webpage=requests.get(url) pagedata=webpage.text soup=BeautifulSoup(pagedata,"html.parser") except requests.exceptions.ConnectionError: print("\033[1;31m[\033[1;31m-\033[1;39m] ERROR CONNECTING THE SERVER...") exit() for script in soup(["script","style"]): script.extract() text1=soup.get_text() text=str(text1.strip()) feb=text.split() iscount=feb.count('is') wascount=feb.count('was') arecount=feb.count('are') forcount=feb.count('for') thecount=feb.count('the') ofcount=feb.count('of') tocount=feb.count('to') try: isinit=0 while isinit<=iscount: feb.remove('is') isinit=isinit+1 wasinit=0 while wasinit<=wascount: feb.remove('was') wasinit=wasinit+1 areinit=0 while areinit<=arecount: feb.remove('are') areinit=areinit+1 forinit=0 while forinit<=forcount: feb.remove('for') forinit=forinit+1 theinit=0 while theinit<=thecount: feb.remove('the') theinit=theinit+1 ofinit=0 while ofinit<=ofcount: feb.remove('of') ofinit=ofinit+1 toinit=0 while toinit<=tocount: feb.remove('to') toinit=toinit+1 except ValueError: pass feb.sort() print("\033[1;32m-"*74) print("\033[1;32m| Words | count/frequency | Graph | ") print("\033[1;32m-"*74) for string in feb: count=feb.count(string) for i in range(count): feb.remove(string) print(f"\033[1;34m| {string + ' ' * (22 - len(string)) + '| '}{str(count) +' ' * (22 - len(str(count)))}| \033[1;32m{'█' * count} " ) print("\033[1;33m-"*74) def endpoint_harvest(url): print(f"[\033[1;37m+\033[1;39m] Collecting Endpoints / Links from the webpage {url}") from bs4 import BeautifulSoup print("") try: webpage=requests.get(url) pagedata=webpage.text soup=BeautifulSoup(pagedata,"html.parser") except requests.exceptions.ConnectionError: print("\033[1;31m[\033[1;31m-\033[1;39m] ERROR CONNECTING THE SERVER...") exit() endpoint_pattern1=re.compile('(?:href=")(.*?)"') endpoint_pattern2=re.compile('(?:src=")(.*?)"') endpoint1=endpoint_pattern1.findall(pagedata) endpoint2=endpoint_pattern2.findall(pagedata) for link in endpoint1: print(link.replace("href=","").replace("'","").replace(">","").replace('"','').replace("</"," ")) for src in endpoint2: print(src.replace("src=","").replace("'","").replace(">","").replace('"','').replace("</"," ")) print("") print("[\033[1;37m+\033[1;39m] Finished!") def param(url): from bs4 import BeautifulSoup print("") try: webpage=requests.get(url) pagedata=webpage.text soup=BeautifulSoup(pagedata,"html.parser") except requests.exceptions.ConnectionError: print("\033[1;31m[\033[1;31m-\033[1;39m] ERROR CONNECTING THE SERVER...") exit() params=soup.find_all("input") print("[\033[1;37m+\033[1;39m] Extracting Parameters from the WebPage!\n") for param in params: print(param.get("name")) print("[\033[1;37m+\033[1;39m] Finished!") parser = argparse.ArgumentParser(description='Parse the domain, wordlist etc..') parser.add_argument('-link',dest='link', action='store_true',help='Extract Endpoints from url!') parser.add_argument('-admin',dest='admin', action='store_true',help='Find Admin Panel of the given URL !') parser.add_argument('-sub',dest='sub', action='store_true',help='Subdomain brute force of the given domain !') parser.add_argument('-param',dest='param', action='store_true',help='Find hidden parameters from the given URL !') parser.add_argument('-wordlist',dest='wordlist', action='store_true',help='Create targeted wordlist from the given URL !') parser.add_argument('-analyze',dest='analyze', action='store_true',help='Analyze words and their frequencies from the given URL !') parser.add_argument('-u',"--url",dest='url', action='store',help='The URL of the webpage!') parser.add_argument('-d',"--domain",dest='domain', action='store',help='The domain name for sub domain brute-force!') parser.add_argument('-w',"--wordlist",dest='list', action='store',help='Extract Endpoints from url!') parser.add_argument('-o',"--outfile",dest='outfile', action='store',help='Output file to save the generated wordlist!!') parser.add_argument('-v',"--version",dest='version', action='store_true',help='Version / Update Check !') args=parser.parse_args() try: if args.link and args.url: if args.url.startswith("http://") or args.url.startswith("https://"): endpoint_harvest(args.url) else: print("[\033[1;31m-\033[1;39m] Invalid URL !") exit() elif args.admin and args.url: if args.url.startswith("http://") or args.url.startswith("https://"): febrev_fuzz(args.url) else: print("[\033[1;31m-\033[1;39m] Invalid URL !") exit() elif args.sub and args.domain and args.list: if args.domain.startswith("http://") or args.domain.startswith("https://"): print("[\033[1;31m-\033[1;39m] Expected Domain name not URL!") exit() else: sub_brute(args.domain,args.list) elif args.wordlist and args.url and args.outfile: if args.url.startswith("http://") or args.url.startswith("https://"): wordlistgen(args.url,args.outfile) else: print("[\033[1;31m-\033[1;39m] Invalid URL !") exit() elif args.analyze and args.url: if args.url.startswith("http://") or args.url.startswith("https://"): word_analyze(args.url) else: print("[\033[1;31m-\033[1;39m] Invalid URL !") exit() elif args.param and args.url: if args.url.startswith("http://") or args.url.startswith("https://"): param(args.url) else: print("[\033[1;31m-\033[1;39m] Invalid URL !") exit() elif args.version: print(f"CURRENT VERSION : {version}") try: verq=requests.get("http://raw.githubusercontent.com/febinrev/web_reamer/master/version") ver=float(verq.text.split()[0]) if ver > version: print(f"[\033[1;37m+\033[1;39m] New Version {ver} of WEB_REAMER is available : https://github.com/febinrev/web_reamer.git") else: print("[\033[1;37m+\033[1;39m] WEB_REAMER is up-to-date!") except requests.exceptions.ConnectionError: print("[\033[1;31m-\033[1;39m] Error Connecting github !") else: print("""\033[1;33m Usage: \033[1;32m1. Endpoint / Link Extraction: \033[1;39m ./web_reamer.py -link -u http://sample.com/ \033[1;32m 2. Admin Panel fuzzing: \033[1;39m ./web_reamer.py -admin -u http://sample.com/ \033[1;32m 3. Subdomain Brute Force: \033[1;39m ./web_reamer.py -sub -d sample.com -w subdomains.txt \033[1;32m 4. Find hidden parameters from webpage: \033[1;39m ./web_reamer.py -param -u http://sample.com/ \033[1;32m 5. Create Targetted Wordlist from webpage: \033[1;39m ./web_reamer.py -wordlist -u http://sample.com/ -o outfile_wordlist.txt \033[1;32m 6. Analyze Word frequencies from the WebPage : \033[1;39m ./web_reamer.py -analyze -u http://sample.com/ \033[1;32m 7. Help : \033[1;39m ./web_reamer.py -h \033[1;32m \033[1;39m ./web_reamer.py --help \033[1;32m 8. Version / Update Check : \033[1;39m ./web_reamer.py -v \033[1;32m \033[1;39m ./web_reamer.py --version \033[1;32m """) except KeyboardInterrupt: print("\n\033[1;39m[\033[1;31m-\033[1;39m] User Interruption! Exit!") exit()
web_reamer.py
12,209
!/usr/bin/env python3
21
fr
0.448822
from prettytable import PrettyTable from collections import OrderedDict def _fieldnames(rows): def g(): for row in rows: yield from row d = OrderedDict((k, None) for k in g()) return list(d.keys()) def _echo_table(rows): if not rows: return fieldnames = _fieldnames(rows) table = PrettyTable(fieldnames) table.align = 'l' for row in rows: table.add_row([row[k] or '' for k in fieldnames]) click.echo(table.get_string()) def _echo_row(row): if not row: return table = PrettyTable(row.keys()) table.align = 'l' table.add_row(row.values()) click.echo(table.get_string()) def _echo_item(x): if not x: return click.echo(x) import os import logging import click import click_log from . import config _logger = logging.getLogger(__name__) click_log.basic_config(_logger) @click.group() def cli(): pass from . import blackboard @cli.group(name='blackboard') def cli_blackboard(): pass @cli_blackboard.command(name='download', help='Download') @click.option('--get-password', default=None, help='Command to evaluate to get password (default is to ask).') @click.option('--netid', default=None, help='Use this NetID.') @click.argument('link_text', type=click.STRING) @click_log.simple_verbosity_option(_logger) def cli_blackboard_download(netid, get_password, link_text): if netid is None: netid = config.NETID if get_password is None: get_password = config.get_password x = blackboard.download(netid=netid, get_password=get_password, link_text=link_text) _echo_item(x) @cli_blackboard.command(name='upload', help='Upload') @click.option('--get-password', default=None, help='Command to evaluate to get password (default is to ask).') @click.option('--netid', default=None, help='Use this NetID.') @click.argument('link_text', type=click.STRING) @click.argument('path', type=click.STRING) @click_log.simple_verbosity_option(_logger) def cli_blackboard_upload(netid, get_password, link_text, path): if netid is None: netid = config.NETID if get_password is None: get_password = config.get_password blackboard.upload(netid=netid, get_password=get_password, link_text=link_text, path=path) @cli_blackboard.command(name='webassign', help='WebAssign') @click.option('--get-password', default=None, help='Command to evaluate to get password (default is to ask).') @click.option('--netid', default=None, help='Use this NetID.') @click.argument('link_text', type=click.STRING) @click_log.simple_verbosity_option(_logger) def cli_blackboard_webassign(netid, get_password, link_text): if netid is None: netid = config.NETID if get_password is None: get_password = config.get_password blackboard.webassign(netid=netid, get_password=get_password, link_text=link_text) @cli_blackboard.command(name='combo', help='Combine the other commands') @click.option('--get-password', default=None, help='Command to evaluate to get password (default is to ask).') @click.option('--netid', default=None, help='Use this NetID.') @click.option('--upload', type=click.Path(exists=True), default=None, help="CSV to upload.") @click.option('--webassign/--no-webassign', default=False, help="Export/import WebAssign.") @click.option('--download/--no-download', default=True, help="Download CSV.") @click.argument('link_text', type=click.STRING) @click_log.simple_verbosity_option(_logger) def cli_blackboard_webassign(netid, get_password, link_text, upload, webassign, download): if netid is None: netid = config.NETID if get_password is None: get_password = config.get_password if not (upload is None): blackboard.upload(netid=netid, get_password=get_password, link_text=link_text, path=upload) if webassign: blackboard.webassign(netid=netid, get_password=get_password, link_text=link_text) if download: x = blackboard.download(netid=netid, get_password=get_password, link_text=link_text) _echo_item(x) from . import ldap @cli.group(name='ldap') def cli_ldap(): pass @cli_ldap.command(name='filter', help='LDAP search with user-specified filter.') @click.argument('filter', type=click.STRING) @click.argument('keys', nargs=-1, type=click.STRING) @click_log.simple_verbosity_option(_logger) def cli_ldap_filter(filter, keys): rows = list(ldap.filter(filter), list(keys)) _echo_table(rows) @cli_ldap.command(name='search', help='Perform an LDAP search with filter: .' + ldap.SEARCH_FILTER) @click.argument('term', type=click.STRING) @click.argument('keys', nargs=-1, type=click.STRING) @click_log.simple_verbosity_option(_logger) def cli_ldap_search(term, keys): rows = list(ldap.search(term, list(keys))) _echo_table(rows) @cli_ldap.command(name='netid', help='Filter by NetID') @click.argument('netid', type=click.STRING) @click.argument('keys', nargs=-1, type=click.STRING) @click_log.simple_verbosity_option(_logger) def cli_ldap_netid(netid, keys): row = ldap.netid(netid, list(keys)) _echo_row(row) @cli_ldap.command(name='alias', help='Filter by alias/PEA') @click.argument('alias', type=click.STRING) @click.argument('keys', nargs=-1, type=click.STRING) @click_log.simple_verbosity_option(_logger) def cli_ldap_alias(alias, keys): row = ldap.alias(alias, list(keys)) _echo_row(row) @cli_ldap.command(name='netid-to-alias', help='NetID -> alias/PEA') @click.argument('netid', type=click.STRING) @click_log.simple_verbosity_option(_logger) def cli_ldap_netid_to_alias(netid): x = ldap.netid_to_alias(netid) _echo_item(x) @cli_ldap.command(name='alias-to-netid', help='alias -> NetID') @click.argument('alias', type=click.STRING) @click_log.simple_verbosity_option(_logger) def cli_ldap_alias_to_netid(alias): x = ldap.alias_to_netid(alias) _echo_item(x) import os import shutil from . import coursebook @cli.group(name='coursebook') def cli_coursebook(): pass @cli_coursebook.group(name='db') def cli_coursebook_db(): pass @cli_coursebook_db.command(name='update') def cli_coursebook_db_update(): coursebook.db_update() @cli_coursebook_db.command(name='netid-to-address') @click.argument('netid', type=click.STRING) def cli_coursebook_db_netid_to_address(netid): X = list(coursebook.db_netid_to_address(netid)) _echo_item(' '.join(X)) @cli_coursebook.group(name='roster') def cli_coursebook_roster(): pass @cli_coursebook_roster.command(name='xlsx-to-csv', help='Convert a CourseBook roster XLSX to CSV.') @click.option('--force/--no-force', default=False, help="Overwrite existing file.") @click.argument('source', type=click.Path(exists=True)) @click.argument('target', type=click.Path()) def cli_coursebook_xlsx_to_csv(force, source, target): if os.path.exists(target) and not force: raise click.ClickException('File exists, maybe use --force?: ' + target) coursebook.roster_xlsx_to_csv(source, target) @cli_coursebook_roster.group(name='download') def cli_coursebook_roster_download(): pass @cli_coursebook_roster.command(name='download', help='Download a CourseBook roster.') @click.option('--force/--no-force', default=False, help="Overwrite existing file.") @click.option('--new/--no-new', default=False, help="Get a new file (don't use the cache).") @click.option('--get-password', default=None, help='Command to evaluate to get password (default is to ask).') @click.option('--netid', default=None, help='Use this NetID.') @click.argument('address', nargs=-1, type=click.STRING) def cli_coursebook_roster_download(netid, get_password, new, force, address): def _split(x): y, f = os.path.splitext(x) return y, f[1:] for x in address: _, f = _split(x) if not (f in coursebook.ROSTER_FORMAT): raise click.ClickException("{x}: I don't know how to download a `{f}`, only: {these}.".format(x=x, f=f, these=' '.join(coursebook.ROSTER_FORMAT))) # FIXME: check for proper address format if os.path.exists(x) and not force: raise click.ClickException('File exists, maybe use --force?: ' + x) if netid is None: netid = config.NETID if get_password is None: get_password = config.get_password if netid is None: raise click.ClickException('You must either specify a NetID in {config} or with --netid.'.format(config.CONFIG_FILE)) for x in address: y, f = _split(x) z = coursebook.roster_download(netid=netid, get_password=get_password, address=y, format=f, new=new) shutil.copyfile(z, x)
utd/script.py
8,527
FIXME: check for proper address format
38
en
0.586758
""" Argparse utilities""" import sys from six import PY2 from argparse import ArgumentParser try: from argparse import _SubParsersAction except ImportError: _SubParsersAction = type(None) class PatchArgumentParser: _original_parse_args = None _original_parse_known_args = None _original_add_subparsers = None _add_subparsers_counter = 0 _current_task = None _calling_current_task = False _last_parsed_args = None _last_arg_parser = None @staticmethod def add_subparsers(self, **kwargs): if 'dest' not in kwargs: if kwargs.get('title'): kwargs['dest'] = '/' + kwargs['title'] else: PatchArgumentParser._add_subparsers_counter += 1 kwargs['dest'] = '/subparser%d' % PatchArgumentParser._add_subparsers_counter return PatchArgumentParser._original_add_subparsers(self, **kwargs) @staticmethod def parse_args(self, args=None, namespace=None): return PatchArgumentParser._patched_parse_args(PatchArgumentParser._original_parse_args, self, args=args, namespace=namespace) @staticmethod def parse_known_args(self, args=None, namespace=None): return PatchArgumentParser._patched_parse_args(PatchArgumentParser._original_parse_known_args, self, args=args, namespace=namespace) @staticmethod def _patched_parse_args(original_parse_fn, self, args=None, namespace=None): current_task = PatchArgumentParser._current_task # if we are running remotely, we always have a task id, so we better patch the argparser as soon as possible. if not current_task: from ..config import running_remotely, get_remote_task_id if running_remotely(): # this will cause the current_task() to set PatchArgumentParser._current_task from trains import Task # noinspection PyBroadException try: current_task = Task.get_task(task_id=get_remote_task_id()) except Exception: pass # automatically connect to current task: if current_task: from ..config import running_remotely if PatchArgumentParser._calling_current_task: # if we are here and running remotely by now we should try to parse the arguments if original_parse_fn: PatchArgumentParser._add_last_parsed_args(original_parse_fn(self, args=args, namespace=namespace)) return PatchArgumentParser._last_parsed_args[-1] PatchArgumentParser._calling_current_task = True # Store last instance and result PatchArgumentParser._add_last_arg_parser(self) parsed_args = None # parse if we are running in dev mode if not running_remotely() and original_parse_fn: parsed_args = original_parse_fn(self, args=args, namespace=namespace) PatchArgumentParser._add_last_parsed_args(parsed_args) # noinspection PyBroadException try: # sync to/from task # noinspection PyProtectedMember current_task._connect_argparse( self, args=args, namespace=namespace, parsed_args=parsed_args[0] if isinstance(parsed_args, tuple) else parsed_args ) except Exception: pass # sync back and parse if running_remotely() and original_parse_fn: # if we are running python2 check if we have subparsers, # if we do we need to patch the args, because there is no default subparser if PY2: import itertools def _get_sub_parsers_defaults(subparser, prev=[]): actions_grp = [a._actions for a in subparser.choices.values()] if isinstance( subparser, _SubParsersAction) else [subparser._actions] sub_parsers_defaults = [[subparser]] if hasattr( subparser, 'default') and subparser.default else [] for actions in actions_grp: sub_parsers_defaults += [_get_sub_parsers_defaults(a, prev) for a in actions if isinstance(a, _SubParsersAction) and hasattr(a, 'default') and a.default] return list(itertools.chain.from_iterable(sub_parsers_defaults)) sub_parsers_defaults = _get_sub_parsers_defaults(self) if sub_parsers_defaults: if args is None: # args default to the system args import sys as _sys args = _sys.argv[1:] else: args = list(args) # make sure we append the subparsers for a in sub_parsers_defaults: if a.default not in args: args.append(a.default) PatchArgumentParser._add_last_parsed_args(original_parse_fn(self, args=args, namespace=namespace)) else: PatchArgumentParser._add_last_parsed_args(parsed_args or {}) PatchArgumentParser._calling_current_task = False return PatchArgumentParser._last_parsed_args[-1] # Store last instance and result PatchArgumentParser._add_last_arg_parser(self) PatchArgumentParser._add_last_parsed_args( {} if not original_parse_fn else original_parse_fn(self, args=args, namespace=namespace)) return PatchArgumentParser._last_parsed_args[-1] @staticmethod def _add_last_parsed_args(parsed_args): PatchArgumentParser._last_parsed_args = (PatchArgumentParser._last_parsed_args or []) + [parsed_args] @staticmethod def _add_last_arg_parser(a_argparser): PatchArgumentParser._last_arg_parser = (PatchArgumentParser._last_arg_parser or []) + [a_argparser] def patch_argparse(): # make sure we only patch once if not sys.modules.get('argparse') or hasattr(sys.modules['argparse'].ArgumentParser, '_parse_args_patched'): return # mark patched argparse sys.modules['argparse'].ArgumentParser._parse_args_patched = True # patch argparser PatchArgumentParser._original_parse_args = sys.modules['argparse'].ArgumentParser.parse_args PatchArgumentParser._original_parse_known_args = sys.modules['argparse'].ArgumentParser.parse_known_args PatchArgumentParser._original_add_subparsers = sys.modules['argparse'].ArgumentParser.add_subparsers sys.modules['argparse'].ArgumentParser.parse_args = PatchArgumentParser.parse_args sys.modules['argparse'].ArgumentParser.parse_known_args = PatchArgumentParser.parse_known_args sys.modules['argparse'].ArgumentParser.add_subparsers = PatchArgumentParser.add_subparsers # Notice! we are patching argparser, sop we know if someone parsed arguments before connecting to task patch_argparse() def call_original_argparser(self, args=None, namespace=None): if PatchArgumentParser._original_parse_args: return PatchArgumentParser._original_parse_args(self, args=args, namespace=namespace) def argparser_parseargs_called(): return PatchArgumentParser._last_arg_parser is not None def argparser_update_currenttask(task): PatchArgumentParser._current_task = task def get_argparser_last_args(): if not PatchArgumentParser._last_arg_parser or not PatchArgumentParser._last_parsed_args: return [] return [(parser, args[0] if isinstance(args, tuple) else args) for parser, args in zip(PatchArgumentParser._last_arg_parser, PatchArgumentParser._last_parsed_args)] def add_params_to_parser(parser, params): assert isinstance(parser, ArgumentParser) assert isinstance(params, dict) def get_type_details(v): for t in (int, float, str): try: value = t(v) return t, value except ValueError: continue # AJB temporary protection from ui problems sending empty dicts params.pop('', None) for param, value in params.items(): type, type_value = get_type_details(value) parser.add_argument('--%s' % param, type=type, default=type_value) return parser
trains/utilities/args.py
8,697
Argparse utilities if we are running remotely, we always have a task id, so we better patch the argparser as soon as possible. this will cause the current_task() to set PatchArgumentParser._current_task noinspection PyBroadException automatically connect to current task: if we are here and running remotely by now we should try to parse the arguments Store last instance and result parse if we are running in dev mode noinspection PyBroadException sync to/from task noinspection PyProtectedMember sync back and parse if we are running python2 check if we have subparsers, if we do we need to patch the args, because there is no default subparser args default to the system args make sure we append the subparsers Store last instance and result make sure we only patch once mark patched argparse patch argparser Notice! we are patching argparser, sop we know if someone parsed arguments before connecting to task AJB temporary protection from ui problems sending empty dicts
976
en
0.711236
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Apr, 2019 @author: Nathan de Lara <[email protected]> """ from typing import Optional, Union import numpy as np from sknetwork.utils.check import check_seeds def stack_seeds(n_row: int, n_col: int, seeds_row: Optional[Union[np.ndarray, dict]], seeds_col: Optional[Union[np.ndarray, dict]] = None, default_value: float = -1) -> np.ndarray: """Process seeds for rows and columns and stack the results into a single vector.""" if seeds_row is None and seeds_col is None: seeds_row = np.ones(n_row) seeds_col = default_value * np.ones(n_col) elif seeds_row is None: seeds_row = default_value * np.ones(n_row) elif seeds_col is None: seeds_col = default_value * np.ones(n_col) seeds_row = check_seeds(seeds_row, n_row) seeds_col = check_seeds(seeds_col, n_col) return np.hstack((seeds_row, seeds_col)) def seeds2probs(n: int, seeds: Union[dict, np.ndarray] = None) -> np.ndarray: """Transform seeds into probability vector. Parameters ---------- n : int Total number of samples. seeds : If ``None``, the uniform distribution is used. Otherwise, a non-negative, non-zero vector or a dictionary must be provided. Returns ------- probs: np.ndarray A probability vector. """ if seeds is None: return np.ones(n) / n else: seeds = check_seeds(seeds, n) probs = np.zeros_like(seeds, dtype=float) ix = (seeds > 0) probs[ix] = seeds[ix] w: float = probs.sum() if w > 0: return probs / w else: raise ValueError('At least one seeds must have a positive probability.')
sknetwork/utils/seeds.py
1,760
Transform seeds into probability vector. Parameters ---------- n : int Total number of samples. seeds : If ``None``, the uniform distribution is used. Otherwise, a non-negative, non-zero vector or a dictionary must be provided. Returns ------- probs: np.ndarray A probability vector. Process seeds for rows and columns and stack the results into a single vector. Created on Apr, 2019 @author: Nathan de Lara <[email protected]> !/usr/bin/env python3 -*- coding: utf-8 -*-
488
en
0.61039
""" This module provides the unsafe things for targets/numbers.py """ from .. import types from ..extending import intrinsic from llvmlite import ir @intrinsic def viewer(tyctx, val, viewty): """ Bitcast a scalar 'val' to the given type 'viewty'. """ bits = val.bitwidth if isinstance(viewty.dtype, types.Integer): bitcastty = ir.IntType(bits) elif isinstance(viewty.dtype, types.Float): bitcastty = ir.FloatType() if bits == 32 else ir.DoubleType() else: assert 0, "unreachable" def codegen(cgctx, builder, typ, args): flt = args[0] return builder.bitcast(flt, bitcastty) retty = viewty.dtype sig = retty(val, viewty) return sig, codegen @intrinsic def trailing_zeros(typeingctx, src): """Counts trailing zeros in the binary representation of an integer.""" if not isinstance(src, types.Integer): raise TypeError( "trailing_zeros is only defined for integers, but passed value was" " '{}'.".format(src) ) def codegen(context, builder, signature, args): [src] = args return builder.cttz(src, ir.Constant(ir.IntType(1), 0)) return src(src), codegen @intrinsic def leading_zeros(typeingctx, src): """Counts leading zeros in the binary representation of an integer.""" if not isinstance(src, types.Integer): raise TypeError( "leading_zeros is only defined for integers, but passed value was " "'{}'.".format(src) ) def codegen(context, builder, signature, args): [src] = args return builder.ctlz(src, ir.Constant(ir.IntType(1), 0)) return src(src), codegen
numba/unsafe/numbers.py
1,684
Counts leading zeros in the binary representation of an integer. Counts trailing zeros in the binary representation of an integer. Bitcast a scalar 'val' to the given type 'viewty'. This module provides the unsafe things for targets/numbers.py
244
en
0.755063
# -*- coding: utf-8 -*- """ Created on Sat Aug 3 23:07:15 2019 @author: ydima """ import logging import os from pathlib import Path import random import shlex import string from subprocess import PIPE, Popen import tempfile from typing import Dict, List, Optional, Union import pandas as pd from .constants import ( DIRECTIONS, IN, IS_WIN32, NEWLINE, OUT, QUERY, QUERYOUT, SQLCHAR, TABLE, VIEW, BCPandasException, BCPandasValueError, read_data_settings, sql_collation, ) logger = logging.getLogger(__name__) def bcp( sql_item: str, direction: str, flat_file: str, creds, sql_type: str = "table", schema: str = "dbo", format_file_path: str = None, batch_size: int = None, col_delimiter: str = None, row_terminator: str = None, bcp_path: Union[str, Path] = None, error_file_path: str = None ): """ See https://docs.microsoft.com/en-us/sql/tools/bcp-utility """ combos = {TABLE: [IN, OUT], QUERY: [QUERYOUT], VIEW: [IN, OUT]} direc = direction.lower() # validation if direc not in DIRECTIONS: raise BCPandasValueError( f"Param 'direction' must be one of {DIRECTIONS}, you passed {direc}" ) if direc not in combos[sql_type]: raise BCPandasValueError( f"Wrong combo of direction and SQL object, you passed {sql_type} and {direc} ." ) # auth if creds.with_krb_auth: auth = ["-T"] else: auth = ["-U", creds.username, "-P", creds.password] # prepare SQL item string if sql_type == QUERY: # remove newlines for queries, otherwise messes up BCP sql_item_string = quote_this("".join(sql_item.splitlines())) else: sql_item_string = f"{schema}.{sql_item}" # construct BCP command bcp_command = [ "bcp" if bcp_path is None else quote_this(str(bcp_path)), sql_item_string, direc, flat_file, "-S", creds.server, "-d", creds.database, "-q", # Executes the SET QUOTED_IDENTIFIERS ON statement, needed for Azure SQL DW "-e", error_file_path ] + auth if batch_size: bcp_command += ["-b", str(batch_size)] # formats if direc == IN: bcp_command += ["-f", format_file_path] elif direc in (OUT, QUERYOUT): bcp_command += [ "-c", # marking as character data, not Unicode (maybe make as param?) quote_this( f"-t{read_data_settings['delimiter'] if col_delimiter is None else col_delimiter}" ), quote_this( f"-r{read_data_settings['newline'] if row_terminator is None else row_terminator}" ), ] # execute bcp_command_log = [c if c != creds.password else "[REDACTED]" for c in bcp_command] logger.info(f"Executing BCP command now... \nBCP command is: {bcp_command_log}") ret_code = run_cmd(bcp_command) if ret_code: raise BCPandasException(f"Bcp command failed with exit code {ret_code}") def get_temp_file() -> str: """ Returns full path to a temporary file without creating it. """ tmp_dir = tempfile.gettempdir() file_path = os.path.join( tmp_dir, "".join(random.choices(string.ascii_letters + string.digits, k=21)) ) return file_path def _escape(input_string: str) -> str: """ Adopted from https://github.com/titan550/bcpy/blob/master/bcpy/format_file_builder.py#L25 """ return ( input_string.replace('"', '\\"') .replace("'", "\\'") .replace("\r", "\\r") .replace("\n", "\\n") ) def build_format_file( df: pd.DataFrame, delimiter: str, db_cols_order: Optional[Dict[str, int]] = None ) -> str: """ Creates the non-xml SQL format file. Puts 4 spaces between each section. See https://docs.microsoft.com/en-us/sql/relational-databases/import-export/non-xml-format-files-sql-server for the specification of the file. # TODO add params/options to control: # - the char type (not just SQLCHAR), Parameters ---------- df : pandas DataFrame delimiter : a valid delimiter character db_cols_order : dict, optional Dict of {database column name -> ordinal position of the column}. Maps existing columns in the database to their ordinal position, i.e. the order of the columns in the db table. 1-indexed, so the first columns is 1, second is 2, etc. Only needed if the order of the columns in the dataframe doesn't match the database. Returns ------- A string containing the format file """ _space = " " * 4 format_file_str = f"9.0\n{len(df.columns)}\n" # Version and Number of columns for col_num, col_name in enumerate(df.columns, start=1): # last col gets a newline sep _delim = delimiter if col_num != len(df.columns) else NEWLINE _line = _space.join( [ str(col_num), # Host file field order SQLCHAR, # Host file data type str(0), # Prefix length str(0), # Host file data length f'"{_escape(_delim)}"', # Terminator (see note below) str( col_num if not db_cols_order else db_cols_order[str(col_name)] ), # Server column order str(col_name), # Server column name, optional as long as not blank sql_collation, # Column collation "\n", ] ) format_file_str += _line # FYI very important to surround the Terminator with quotes, otherwise BCP fails with: # "Unexpected EOF encountered in BCP data-file". Hugely frustrating bug. return format_file_str def quote_this(this: str, skip: bool = False) -> str: """ OS-safe way to quote a string. Returns the string with quotes around it. On Windows ~~it's double quotes~~ we skip quoting, on Linux it's single quotes. """ if isinstance(this, str): if IS_WIN32: return this # TODO maybe change? else: return shlex.quote(this) else: return this def run_cmd(cmd: List[str]) -> int: """ Runs the given command. Prints STDOUT in real time, prints STDERR when command is complete, and logs both STDOUT and STDERR. Paramters --------- cmd : list of str The command to run, to be submitted to `subprocess.Popen()` Returns ------- The exit code of the command """ if IS_WIN32: with_shell = False else: with_shell = True cmd = " ".join(cmd) # type: ignore proc = Popen(cmd, stdout=PIPE, stderr=PIPE, encoding="utf-8", errors="utf-8", shell=with_shell,) # live stream STDOUT while True: outs = proc.stdout.readline() if outs: print(outs, end="") logger.info(outs) if proc.poll() is not None and outs == "": break errs = proc.stderr.readlines() if errs: print(errs, end="") logger.error(errs) return proc.returncode
bcpandas/utils.py
7,204
Adopted from https://github.com/titan550/bcpy/blob/master/bcpy/format_file_builder.py#L25 See https://docs.microsoft.com/en-us/sql/tools/bcp-utility Creates the non-xml SQL format file. Puts 4 spaces between each section. See https://docs.microsoft.com/en-us/sql/relational-databases/import-export/non-xml-format-files-sql-server for the specification of the file. # TODO add params/options to control: # - the char type (not just SQLCHAR), Parameters ---------- df : pandas DataFrame delimiter : a valid delimiter character db_cols_order : dict, optional Dict of {database column name -> ordinal position of the column}. Maps existing columns in the database to their ordinal position, i.e. the order of the columns in the db table. 1-indexed, so the first columns is 1, second is 2, etc. Only needed if the order of the columns in the dataframe doesn't match the database. Returns ------- A string containing the format file Returns full path to a temporary file without creating it. OS-safe way to quote a string. Returns the string with quotes around it. On Windows ~~it's double quotes~~ we skip quoting, on Linux it's single quotes. Runs the given command. Prints STDOUT in real time, prints STDERR when command is complete, and logs both STDOUT and STDERR. Paramters --------- cmd : list of str The command to run, to be submitted to `subprocess.Popen()` Returns ------- The exit code of the command Created on Sat Aug 3 23:07:15 2019 @author: ydima -*- coding: utf-8 -*- validation auth prepare SQL item string remove newlines for queries, otherwise messes up BCP construct BCP command Executes the SET QUOTED_IDENTIFIERS ON statement, needed for Azure SQL DW formats marking as character data, not Unicode (maybe make as param?) execute Version and Number of columns last col gets a newline sep Host file field order Host file data type Prefix length Host file data length Terminator (see note below) Server column order Server column name, optional as long as not blank Column collation FYI very important to surround the Terminator with quotes, otherwise BCP fails with: "Unexpected EOF encountered in BCP data-file". Hugely frustrating bug. TODO maybe change? type: ignore live stream STDOUT
2,240
en
0.719074
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py', '../_base_/swa.py' ] # model settings model = dict( type='ATSS', pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth', backbone=dict( type='SwinTransformer', embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., ape=False, drop_path_rate=0.3, patch_norm=True, out_indices=(0, 1, 2, 3), use_checkpoint=True, ), neck=dict( type='PAFPNX', in_channels=[128, 256, 512, 1024], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5, relu_before_extra_convs=True, pafpn_conv_cfg=dict(type='DCNv2'), norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)), bbox_head=dict( type='DDODHead', num_classes=1, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict(type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='CIoULoss', loss_weight=2.0), loss_iou=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), # training and testing settings train_cfg=dict( assigner=dict(type='ATSSCostAssigner', topk=9), reg_assigner=dict(type='ATSSCostAssigner', topk=9, alpha=0.5), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # data setting dataset_type = 'CocoDataset' data_root = '/content/data/' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) albu_train_transforms = [ dict(type='ShiftScaleRotate', shift_limit=0.0625, scale_limit=0.0, rotate_limit=0, interpolation=1, p=0.5), dict(type='RandomBrightnessContrast', brightness_limit=[0.1, 0.3], contrast_limit=[0.1, 0.3], p=0.2), dict( type='OneOf', transforms=[ dict( type='RGBShift', r_shift_limit=10, g_shift_limit=10, b_shift_limit=10, p=1.0), dict( type='HueSaturationValue', hue_shift_limit=20, sat_shift_limit=30, val_shift_limit=20, p=1.0) ], p=0.1), dict(type='ImageCompression', quality_lower=85, quality_upper=95, p=0.2), dict(type='ChannelShuffle', p=0.1), dict( type='OneOf', transforms=[ dict(type='Blur', blur_limit=3, p=1.0), dict(type='MedianBlur', blur_limit=3, p=1.0) ], p=0.1), ] train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='RandomCrop', crop_type='relative_range', crop_size=(0.9, 0.9), allow_negative_crop = False), dict( type='Resize', img_scale=[(720, 720), (960, 960)], multiscale_mode='range', keep_ratio=True), dict( type='CutOut', n_holes=(5, 10), cutout_shape=[(4, 4), (4, 8), (8, 4), (8, 8), (16, 8), (8, 16), (16, 16), (16, 32), (32, 16), (32, 32), (32, 48), (48, 32), (48, 48)]), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Pad', size_divisor=32), dict( type='Albu', transforms=albu_train_transforms, bbox_params=dict( type='BboxParams', format='pascal_voc', label_fields=['gt_labels'], min_visibility=0.0, filter_lost_elements=True), keymap={ 'img': 'image', 'gt_bboxes': 'bboxes' }, update_pad_shape=False, skip_img_without_anno=True), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(800, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=12, workers_per_gpu=4, train=dict(type = dataset_type, ann_file = data_root + '/annotations/instances_train2017.json', img_prefix = 'train_images/', pipeline=train_pipeline), val=dict(type = dataset_type, ann_file = data_root + '/annotations/instances_val2017.json', img_prefix = 'val_images/', pipeline=test_pipeline, samples_per_gpu = 24), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, betas = (0.9, 0.999), weight_decay=0.05) optimizer_config = dict(grad_clip = None) log_config = dict(interval = 10) # learning policy lr_config = dict( policy='CosineAnnealing', min_lr_ratio = 0.2, warmup='linear', warmup_iters=500, warmup_ratio=0.1, ) runner = dict(type='IterBasedRunner', max_iters=3000, max_epochs = None) checkpoint_config = dict(interval = 100) evaluation = dict(interval = 100, metric = 'bbox') fp16 = dict(loss_scale=512.) # runtime load_from = '/gdrive/My Drive/checkpoints/bvr_atss_x101_dcn_fpn_2x_coco.pth' resume_from = None workflow = [('train', 1)]
configs/ddod/swin.py
5,343
model settings training and testing settings data setting optimizer learning policy runtime
91
en
0.83651
# 電子レンジ def get_E_Elc_microwave_d_t(P_Elc_microwave_cook_rtd, t_microwave_cook_d_t): """時刻別消費電力量を計算する Parameters ---------- P_Elc_microwave_cook_rtd : float 調理時の定格待機電力, W t_microwave_cook_d_t : ndarray(N-dimensional array) 1年間の全時間の調理時間を格納したND配列, h d日t時の調理時間が年開始時から8760個連続して格納されている Returns ---------- E_Elc_microwave_d_t : ndarray(N-dimensional array) 1年間の全時間の消費電力量を格納したND配列, Wh d日t時の消費電力量が年開始時から8760個連続して格納されている """ P_Elc_microwave_cook = get_P_Elc_microwave_cook(P_Elc_microwave_cook_rtd) E_Elc_microwave_d_t = P_Elc_microwave_cook * t_microwave_cook_d_t E_Elc_microwave_d_t = E_Elc_microwave_d_t * 10**(-3) return E_Elc_microwave_d_t def get_P_Elc_microwave_cook(P_Elc_microwave_rtd): """調理時の消費電力を計算する Parameters ---------- P_Elc_microwave_cook_rtd : float 調理時の定格待機電力, W Returns ---------- P_Elc_microwave_cook : float 調理時の消費電力, W """ P_Elc_microwave_cook = 0.9373 * P_Elc_microwave_rtd return P_Elc_microwave_cook
src/pyhees/section10_j1_f.py
1,435
時刻別消費電力量を計算する Parameters ---------- P_Elc_microwave_cook_rtd : float 調理時の定格待機電力, W t_microwave_cook_d_t : ndarray(N-dimensional array) 1年間の全時間の調理時間を格納したND配列, h d日t時の調理時間が年開始時から8760個連続して格納されている Returns ---------- E_Elc_microwave_d_t : ndarray(N-dimensional array) 1年間の全時間の消費電力量を格納したND配列, Wh d日t時の消費電力量が年開始時から8760個連続して格納されている 調理時の消費電力を計算する Parameters ---------- P_Elc_microwave_cook_rtd : float 調理時の定格待機電力, W Returns ---------- P_Elc_microwave_cook : float 調理時の消費電力, W 電子レンジ
519
ja
0.935555
from ... import create_engine from ... import exc from ...engine import url as sa_url from ...testing.provision import configure_follower from ...testing.provision import create_db from ...testing.provision import drop_db from ...testing.provision import follower_url_from_main from ...testing.provision import log from ...testing.provision import run_reap_dbs from ...testing.provision import temp_table_keyword_args from ...testing.provision import update_db_opts @create_db.for_db("oracle") def _oracle_create_db(cfg, eng, ident): # NOTE: make sure you've run "ALTER DATABASE default tablespace users" or # similar, so that the default tablespace is not "system"; reflection will # fail otherwise with eng.connect() as conn: conn.execute("create user %s identified by xe" % ident) conn.execute("create user %s_ts1 identified by xe" % ident) conn.execute("create user %s_ts2 identified by xe" % ident) conn.execute("grant dba to %s" % (ident,)) conn.execute("grant unlimited tablespace to %s" % ident) conn.execute("grant unlimited tablespace to %s_ts1" % ident) conn.execute("grant unlimited tablespace to %s_ts2" % ident) @configure_follower.for_db("oracle") def _oracle_configure_follower(config, ident): config.test_schema = "%s_ts1" % ident config.test_schema_2 = "%s_ts2" % ident def _ora_drop_ignore(conn, dbname): try: conn.execute("drop user %s cascade" % dbname) log.info("Reaped db: %s", dbname) return True except exc.DatabaseError as err: log.warning("couldn't drop db: %s", err) return False @drop_db.for_db("oracle") def _oracle_drop_db(cfg, eng, ident): with eng.connect() as conn: # cx_Oracle seems to occasionally leak open connections when a large # suite it run, even if we confirm we have zero references to # connection objects. # while there is a "kill session" command in Oracle, # it unfortunately does not release the connection sufficiently. _ora_drop_ignore(conn, ident) _ora_drop_ignore(conn, "%s_ts1" % ident) _ora_drop_ignore(conn, "%s_ts2" % ident) @update_db_opts.for_db("oracle") def _oracle_update_db_opts(db_url, db_opts): pass @run_reap_dbs.for_db("oracle") def _reap_oracle_dbs(url, idents): log.info("db reaper connecting to %r", url) eng = create_engine(url) with eng.connect() as conn: log.info("identifiers in file: %s", ", ".join(idents)) to_reap = conn.execute( "select u.username from all_users u where username " "like 'TEST_%' and not exists (select username " "from v$session where username=u.username)" ) all_names = {username.lower() for (username,) in to_reap} to_drop = set() for name in all_names: if name.endswith("_ts1") or name.endswith("_ts2"): continue elif name in idents: to_drop.add(name) if "%s_ts1" % name in all_names: to_drop.add("%s_ts1" % name) if "%s_ts2" % name in all_names: to_drop.add("%s_ts2" % name) dropped = total = 0 for total, username in enumerate(to_drop, 1): if _ora_drop_ignore(conn, username): dropped += 1 log.info( "Dropped %d out of %d stale databases detected", dropped, total ) @follower_url_from_main.for_db("oracle") def _oracle_follower_url_from_main(url, ident): url = sa_url.make_url(url) url.username = ident url.password = "xe" return url @temp_table_keyword_args.for_db("oracle") def _oracle_temp_table_keyword_args(cfg, eng): return { "prefixes": ["GLOBAL TEMPORARY"], "oracle_on_commit": "PRESERVE ROWS", }
virtual/lib/python3.7/site-packages/sqlalchemy/dialects/oracle/provision.py
3,862
NOTE: make sure you've run "ALTER DATABASE default tablespace users" or similar, so that the default tablespace is not "system"; reflection will fail otherwise cx_Oracle seems to occasionally leak open connections when a large suite it run, even if we confirm we have zero references to connection objects. while there is a "kill session" command in Oracle, it unfortunately does not release the connection sufficiently.
420
en
0.887421
# -*- coding: utf-8 -*- """ pygments.lexers.other ~~~~~~~~~~~~~~~~~~~~~ Lexers for other languages. :copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \ this, do_insertions from pygments.token import Error, Punctuation, \ Text, Comment, Operator, Keyword, Name, String, Number, Generic from pygments.util import shebang_matches from pygments.lexers.web import HtmlLexer __all__ = ['SqlLexer', 'MySqlLexer', 'SqliteConsoleLexer', 'BrainfuckLexer', 'BashLexer', 'BatchLexer', 'BefungeLexer', 'RedcodeLexer', 'MOOCodeLexer', 'SmalltalkLexer', 'TcshLexer', 'LogtalkLexer', 'GnuplotLexer', 'PovrayLexer', 'AppleScriptLexer', 'BashSessionLexer', 'ModelicaLexer', 'RebolLexer', 'ABAPLexer', 'NewspeakLexer', 'GherkinLexer', 'AsymptoteLexer'] line_re = re.compile('.*?\n') class SqlLexer(RegexLexer): """ Lexer for Structured Query Language. Currently, this lexer does not recognize any special syntax except ANSI SQL. """ name = 'SQL' aliases = ['sql'] filenames = ['*.sql'] mimetypes = ['text/x-sql'] flags = re.IGNORECASE tokens = { 'root': [ (r'\s+', Text), (r'--.*?\n', Comment.Single), (r'/\*', Comment.Multiline, 'multiline-comments'), (r'(ABORT|ABS|ABSOLUTE|ACCESS|ADA|ADD|ADMIN|AFTER|AGGREGATE|' r'ALIAS|ALL|ALLOCATE|ALTER|ANALYSE|ANALYZE|AND|ANY|ARE|AS|' r'ASC|ASENSITIVE|ASSERTION|ASSIGNMENT|ASYMMETRIC|AT|ATOMIC|' r'AUTHORIZATION|AVG|BACKWARD|BEFORE|BEGIN|BETWEEN|BITVAR|' r'BIT_LENGTH|BOTH|BREADTH|BY|C|CACHE|CALL|CALLED|CARDINALITY|' r'CASCADE|CASCADED|CASE|CAST|CATALOG|CATALOG_NAME|CHAIN|' r'CHARACTERISTICS|CHARACTER_LENGTH|CHARACTER_SET_CATALOG|' r'CHARACTER_SET_NAME|CHARACTER_SET_SCHEMA|CHAR_LENGTH|CHECK|' r'CHECKED|CHECKPOINT|CLASS|CLASS_ORIGIN|CLOB|CLOSE|CLUSTER|' r'COALSECE|COBOL|COLLATE|COLLATION|COLLATION_CATALOG|' r'COLLATION_NAME|COLLATION_SCHEMA|COLUMN|COLUMN_NAME|' r'COMMAND_FUNCTION|COMMAND_FUNCTION_CODE|COMMENT|COMMIT|' r'COMMITTED|COMPLETION|CONDITION_NUMBER|CONNECT|CONNECTION|' r'CONNECTION_NAME|CONSTRAINT|CONSTRAINTS|CONSTRAINT_CATALOG|' r'CONSTRAINT_NAME|CONSTRAINT_SCHEMA|CONSTRUCTOR|CONTAINS|' r'CONTINUE|CONVERSION|CONVERT|COPY|CORRESPONTING|COUNT|' r'CREATE|CREATEDB|CREATEUSER|CROSS|CUBE|CURRENT|CURRENT_DATE|' r'CURRENT_PATH|CURRENT_ROLE|CURRENT_TIME|CURRENT_TIMESTAMP|' r'CURRENT_USER|CURSOR|CURSOR_NAME|CYCLE|DATA|DATABASE|' r'DATETIME_INTERVAL_CODE|DATETIME_INTERVAL_PRECISION|DAY|' r'DEALLOCATE|DECLARE|DEFAULT|DEFAULTS|DEFERRABLE|DEFERRED|' r'DEFINED|DEFINER|DELETE|DELIMITER|DELIMITERS|DEREF|DESC|' r'DESCRIBE|DESCRIPTOR|DESTROY|DESTRUCTOR|DETERMINISTIC|' r'DIAGNOSTICS|DICTIONARY|DISCONNECT|DISPATCH|DISTINCT|DO|' r'DOMAIN|DROP|DYNAMIC|DYNAMIC_FUNCTION|DYNAMIC_FUNCTION_CODE|' r'EACH|ELSE|ENCODING|ENCRYPTED|END|END-EXEC|EQUALS|ESCAPE|EVERY|' r'EXCEPT|ESCEPTION|EXCLUDING|EXCLUSIVE|EXEC|EXECUTE|EXISTING|' r'EXISTS|EXPLAIN|EXTERNAL|EXTRACT|FALSE|FETCH|FINAL|FIRST|FOR|' r'FORCE|FOREIGN|FORTRAN|FORWARD|FOUND|FREE|FREEZE|FROM|FULL|' r'FUNCTION|G|GENERAL|GENERATED|GET|GLOBAL|GO|GOTO|GRANT|GRANTED|' r'GROUP|GROUPING|HANDLER|HAVING|HIERARCHY|HOLD|HOST|IDENTITY|' r'IGNORE|ILIKE|IMMEDIATE|IMMUTABLE|IMPLEMENTATION|IMPLICIT|IN|' r'INCLUDING|INCREMENT|INDEX|INDITCATOR|INFIX|INHERITS|INITIALIZE|' r'INITIALLY|INNER|INOUT|INPUT|INSENSITIVE|INSERT|INSTANTIABLE|' r'INSTEAD|INTERSECT|INTO|INVOKER|IS|ISNULL|ISOLATION|ITERATE|JOIN|' r'KEY|KEY_MEMBER|KEY_TYPE|LANCOMPILER|LANGUAGE|LARGE|LAST|' r'LATERAL|LEADING|LEFT|LENGTH|LESS|LEVEL|LIKE|LIMIT|LISTEN|LOAD|' r'LOCAL|LOCALTIME|LOCALTIMESTAMP|LOCATION|LOCATOR|LOCK|LOWER|' r'MAP|MATCH|MAX|MAXVALUE|MESSAGE_LENGTH|MESSAGE_OCTET_LENGTH|' r'MESSAGE_TEXT|METHOD|MIN|MINUTE|MINVALUE|MOD|MODE|MODIFIES|' r'MODIFY|MONTH|MORE|MOVE|MUMPS|NAMES|NATIONAL|NATURAL|NCHAR|' r'NCLOB|NEW|NEXT|NO|NOCREATEDB|NOCREATEUSER|NONE|NOT|NOTHING|' r'NOTIFY|NOTNULL|NULL|NULLABLE|NULLIF|OBJECT|OCTET_LENGTH|OF|OFF|' r'OFFSET|OIDS|OLD|ON|ONLY|OPEN|OPERATION|OPERATOR|OPTION|OPTIONS|' r'OR|ORDER|ORDINALITY|OUT|OUTER|OUTPUT|OVERLAPS|OVERLAY|OVERRIDING|' r'OWNER|PAD|PARAMETER|PARAMETERS|PARAMETER_MODE|PARAMATER_NAME|' r'PARAMATER_ORDINAL_POSITION|PARAMETER_SPECIFIC_CATALOG|' r'PARAMETER_SPECIFIC_NAME|PARAMATER_SPECIFIC_SCHEMA|PARTIAL|' r'PASCAL|PENDANT|PLACING|PLI|POSITION|POSTFIX|PRECISION|PREFIX|' r'PREORDER|PREPARE|PRESERVE|PRIMARY|PRIOR|PRIVILEGES|PROCEDURAL|' r'PROCEDURE|PUBLIC|READ|READS|RECHECK|RECURSIVE|REF|REFERENCES|' r'REFERENCING|REINDEX|RELATIVE|RENAME|REPEATABLE|REPLACE|RESET|' r'RESTART|RESTRICT|RESULT|RETURN|RETURNED_LENGTH|' r'RETURNED_OCTET_LENGTH|RETURNED_SQLSTATE|RETURNS|REVOKE|RIGHT|' r'ROLE|ROLLBACK|ROLLUP|ROUTINE|ROUTINE_CATALOG|ROUTINE_NAME|' r'ROUTINE_SCHEMA|ROW|ROWS|ROW_COUNT|RULE|SAVE_POINT|SCALE|SCHEMA|' r'SCHEMA_NAME|SCOPE|SCROLL|SEARCH|SECOND|SECURITY|SELECT|SELF|' r'SENSITIVE|SERIALIZABLE|SERVER_NAME|SESSION|SESSION_USER|SET|' r'SETOF|SETS|SHARE|SHOW|SIMILAR|SIMPLE|SIZE|SOME|SOURCE|SPACE|' r'SPECIFIC|SPECIFICTYPE|SPECIFIC_NAME|SQL|SQLCODE|SQLERROR|' r'SQLEXCEPTION|SQLSTATE|SQLWARNINIG|STABLE|START|STATE|STATEMENT|' r'STATIC|STATISTICS|STDIN|STDOUT|STORAGE|STRICT|STRUCTURE|STYPE|' r'SUBCLASS_ORIGIN|SUBLIST|SUBSTRING|SUM|SYMMETRIC|SYSID|SYSTEM|' r'SYSTEM_USER|TABLE|TABLE_NAME| TEMP|TEMPLATE|TEMPORARY|TERMINATE|' r'THAN|THEN|TIMESTAMP|TIMEZONE_HOUR|TIMEZONE_MINUTE|TO|TOAST|' r'TRAILING|TRANSATION|TRANSACTIONS_COMMITTED|' r'TRANSACTIONS_ROLLED_BACK|TRANSATION_ACTIVE|TRANSFORM|' r'TRANSFORMS|TRANSLATE|TRANSLATION|TREAT|TRIGGER|TRIGGER_CATALOG|' r'TRIGGER_NAME|TRIGGER_SCHEMA|TRIM|TRUE|TRUNCATE|TRUSTED|TYPE|' r'UNCOMMITTED|UNDER|UNENCRYPTED|UNION|UNIQUE|UNKNOWN|UNLISTEN|' r'UNNAMED|UNNEST|UNTIL|UPDATE|UPPER|USAGE|USER|' r'USER_DEFINED_TYPE_CATALOG|USER_DEFINED_TYPE_NAME|' r'USER_DEFINED_TYPE_SCHEMA|USING|VACUUM|VALID|VALIDATOR|VALUES|' r'VARIABLE|VERBOSE|VERSION|VIEW|VOLATILE|WHEN|WHENEVER|WHERE|' r'WITH|WITHOUT|WORK|WRITE|YEAR|ZONE)\b', Keyword), (r'(ARRAY|BIGINT|BINARY|BIT|BLOB|BOOLEAN|CHAR|CHARACTER|DATE|' r'DEC|DECIMAL|FLOAT|INT|INTEGER|INTERVAL|NUMBER|NUMERIC|REAL|' r'SERIAL|SMALLINT|VARCHAR|VARYING|INT8|SERIAL8|TEXT)\b', Name.Builtin), (r'[+*/<>=~!@#%^&|`?^-]', Operator), (r'[0-9]+', Number.Integer), # TODO: Backslash escapes? (r"'(''|[^'])*'", String.Single), (r'"(""|[^"])*"', String.Symbol), # not a real string literal in ANSI SQL (r'[a-zA-Z_][a-zA-Z0-9_]*', Name), (r'[;:()\[\],\.]', Punctuation) ], 'multiline-comments': [ (r'/\*', Comment.Multiline, 'multiline-comments'), (r'\*/', Comment.Multiline, '#pop'), (r'[^/\*]+', Comment.Multiline), (r'[/*]', Comment.Multiline) ] } class MySqlLexer(RegexLexer): """ Special lexer for MySQL. """ name = 'MySQL' aliases = ['mysql'] mimetypes = ['text/x-mysql'] flags = re.IGNORECASE tokens = { 'root': [ (r'\s+', Text), (r'(#|--\s+).*?\n', Comment.Single), (r'/\*', Comment.Multiline, 'multiline-comments'), (r'[0-9]+', Number.Integer), (r'[0-9]*\.[0-9]+(e[+-][0-9]+)', Number.Float), # TODO: add backslash escapes (r"'(''|[^'])*'", String.Single), (r'"(""|[^"])*"', String.Double), (r"`(``|[^`])*`", String.Symbol), (r'[+*/<>=~!@#%^&|`?^-]', Operator), (r'\b(tinyint|smallint|mediumint|int|integer|bigint|date|' r'datetime|time|bit|bool|tinytext|mediumtext|longtext|text|' r'tinyblob|mediumblob|longblob|blob|float|double|double\s+' r'precision|real|numeric|dec|decimal|timestamp|year|char|' r'varchar|varbinary|varcharacter|enum|set)(\b\s*)(\()?', bygroups(Keyword.Type, Text, Punctuation)), (r'\b(add|all|alter|analyze|and|as|asc|asensitive|before|between|' r'bigint|binary|blob|both|by|call|cascade|case|change|char|' r'character|check|collate|column|condition|constraint|continue|' r'convert|create|cross|current_date|current_time|' r'current_timestamp|current_user|cursor|database|databases|' r'day_hour|day_microsecond|day_minute|day_second|dec|decimal|' r'declare|default|delayed|delete|desc|describe|deterministic|' r'distinct|distinctrow|div|double|drop|dual|each|else|elseif|' r'enclosed|escaped|exists|exit|explain|fetch|float|float4|float8' r'|for|force|foreign|from|fulltext|grant|group|having|' r'high_priority|hour_microsecond|hour_minute|hour_second|if|' r'ignore|in|index|infile|inner|inout|insensitive|insert|int|' r'int1|int2|int3|int4|int8|integer|interval|into|is|iterate|' r'join|key|keys|kill|leading|leave|left|like|limit|lines|load|' r'localtime|localtimestamp|lock|long|loop|low_priority|match|' r'minute_microsecond|minute_second|mod|modifies|natural|' r'no_write_to_binlog|not|numeric|on|optimize|option|optionally|' r'or|order|out|outer|outfile|precision|primary|procedure|purge|' r'raid0|read|reads|real|references|regexp|release|rename|repeat|' r'replace|require|restrict|return|revoke|right|rlike|schema|' r'schemas|second_microsecond|select|sensitive|separator|set|' r'show|smallint|soname|spatial|specific|sql|sql_big_result|' r'sql_calc_found_rows|sql_small_result|sqlexception|sqlstate|' r'sqlwarning|ssl|starting|straight_join|table|terminated|then|' r'to|trailing|trigger|undo|union|unique|unlock|unsigned|update|' r'usage|use|using|utc_date|utc_time|utc_timestamp|values|' r'varying|when|where|while|with|write|x509|xor|year_month|' r'zerofill)\b', Keyword), # TODO: this list is not complete (r'\b(auto_increment|engine|charset|tables)\b', Keyword.Pseudo), (r'(true|false|null)', Name.Constant), (r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(\()', bygroups(Name.Function, Text, Punctuation)), (r'[a-zA-Z_][a-zA-Z0-9_]*', Name), (r'@[A-Za-z0-9]*[._]*[A-Za-z0-9]*', Name.Variable), (r'[;:()\[\],\.]', Punctuation) ], 'multiline-comments': [ (r'/\*', Comment.Multiline, 'multiline-comments'), (r'\*/', Comment.Multiline, '#pop'), (r'[^/\*]+', Comment.Multiline), (r'[/*]', Comment.Multiline) ] } class SqliteConsoleLexer(Lexer): """ Lexer for example sessions using sqlite3. *New in Pygments 0.11.* """ name = 'sqlite3con' aliases = ['sqlite3'] filenames = ['*.sqlite3-console'] mimetypes = ['text/x-sqlite3-console'] def get_tokens_unprocessed(self, data): sql = SqlLexer(**self.options) curcode = '' insertions = [] for match in line_re.finditer(data): line = match.group() if line.startswith('sqlite> ') or line.startswith(' ...> '): insertions.append((len(curcode), [(0, Generic.Prompt, line[:8])])) curcode += line[8:] else: if curcode: for item in do_insertions(insertions, sql.get_tokens_unprocessed(curcode)): yield item curcode = '' insertions = [] if line.startswith('SQL error: '): yield (match.start(), Generic.Traceback, line) else: yield (match.start(), Generic.Output, line) if curcode: for item in do_insertions(insertions, sql.get_tokens_unprocessed(curcode)): yield item class BrainfuckLexer(RegexLexer): """ Lexer for the esoteric `BrainFuck <http://www.muppetlabs.com/~breadbox/bf/>`_ language. """ name = 'Brainfuck' aliases = ['brainfuck', 'bf'] filenames = ['*.bf', '*.b'] mimetypes = ['application/x-brainfuck'] tokens = { 'common': [ # use different colors for different instruction types (r'[.,]+', Name.Tag), (r'[+-]+', Name.Builtin), (r'[<>]+', Name.Variable), (r'[^.,+\-<>\[\]]+', Comment), ], 'root': [ (r'\[', Keyword, 'loop'), (r'\]', Error), include('common'), ], 'loop': [ (r'\[', Keyword, '#push'), (r'\]', Keyword, '#pop'), include('common'), ] } class BefungeLexer(RegexLexer): """ Lexer for the esoteric `Befunge <http://en.wikipedia.org/wiki/Befunge>`_ language. *New in Pygments 0.7.* """ name = 'Befunge' aliases = ['befunge'] filenames = ['*.befunge'] mimetypes = ['application/x-befunge'] tokens = { 'root': [ (r'[0-9a-f]', Number), (r'[\+\*/%!`-]', Operator), # Traditional math (r'[<>^v?\[\]rxjk]', Name.Variable), # Move, imperatives (r'[:\\$.,n]', Name.Builtin), # Stack ops, imperatives (r'[|_mw]', Keyword), (r'[{}]', Name.Tag), # Befunge-98 stack ops (r'".*?"', String.Double), # Strings don't appear to allow escapes (r'\'.', String.Single), # Single character (r'[#;]', Comment), # Trampoline... depends on direction hit (r'[pg&~=@iotsy]', Keyword), # Misc (r'[()A-Z]', Comment), # Fingerprints (r'\s+', Text), # Whitespace doesn't matter ], } class BashLexer(RegexLexer): """ Lexer for (ba)sh shell scripts. *New in Pygments 0.6.* """ name = 'Bash' aliases = ['bash', 'sh'] filenames = ['*.sh', '*.ebuild', '*.eclass'] mimetypes = ['application/x-sh', 'application/x-shellscript'] tokens = { 'root': [ include('basic'), (r'\$\(\(', Keyword, 'math'), (r'\$\(', Keyword, 'paren'), (r'\${#?', Keyword, 'curly'), (r'`', String.Backtick, 'backticks'), include('data'), ], 'basic': [ (r'\b(if|fi|else|while|do|done|for|then|return|function|case|' r'select|continue|until|esac|elif)\s*\b', Keyword), (r'\b(alias|bg|bind|break|builtin|caller|cd|command|compgen|' r'complete|declare|dirs|disown|echo|enable|eval|exec|exit|' r'export|false|fc|fg|getopts|hash|help|history|jobs|kill|let|' r'local|logout|popd|printf|pushd|pwd|read|readonly|set|shift|' r'shopt|source|suspend|test|time|times|trap|true|type|typeset|' r'ulimit|umask|unalias|unset|wait)\s*\b(?!\.)', Name.Builtin), (r'#.*\n', Comment), (r'\\[\w\W]', String.Escape), (r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)), (r'[\[\]{}()=]', Operator), (r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String), (r'&&|\|\|', Operator), ], 'data': [ (r'(?s)\$?"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double), (r"(?s)\$?'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single), (r';', Text), (r'\s+', Text), (r'[^=\s\n\[\]{}()$"\'`\\<]+', Text), (r'\d+(?= |\Z)', Number), (r'\$#?(\w+|.)', Name.Variable), (r'<', Text), ], 'curly': [ (r'}', Keyword, '#pop'), (r':-', Keyword), (r'[a-zA-Z0-9_]+', Name.Variable), (r'[^}:"\'`$]+', Punctuation), (r':', Punctuation), include('root'), ], 'paren': [ (r'\)', Keyword, '#pop'), include('root'), ], 'math': [ (r'\)\)', Keyword, '#pop'), (r'[-+*/%^|&]|\*\*|\|\|', Operator), (r'\d+', Number), include('root'), ], 'backticks': [ (r'`', String.Backtick, '#pop'), include('root'), ], } def analyse_text(text): return shebang_matches(text, r'(ba|z|)sh') class BashSessionLexer(Lexer): """ Lexer for simplistic shell sessions. *New in Pygments 1.1.* """ name = 'Bash Session' aliases = ['console'] filenames = ['*.sh-session'] mimetypes = ['application/x-shell-session'] def get_tokens_unprocessed(self, text): bashlexer = BashLexer(**self.options) pos = 0 curcode = '' insertions = [] for match in line_re.finditer(text): line = match.group() m = re.match(r'^((?:|sh\S*?|\w+\S+[@:]\S+(?:\s+\S+)?|\[\S+[@:]' r'[^\n]+\].+)[$#%])(.*\n?)', line) if m: # To support output lexers (say diff output), the output # needs to be broken by prompts whenever the output lexer # changes. if not insertions: pos = match.start() insertions.append((len(curcode), [(0, Generic.Prompt, m.group(1))])) curcode += m.group(2) elif line.startswith('>'): insertions.append((len(curcode), [(0, Generic.Prompt, line[:1])])) curcode += line[1:] else: if insertions: toks = bashlexer.get_tokens_unprocessed(curcode) for i, t, v in do_insertions(insertions, toks): yield pos+i, t, v yield match.start(), Generic.Output, line insertions = [] curcode = '' if insertions: for i, t, v in do_insertions(insertions, bashlexer.get_tokens_unprocessed(curcode)): yield pos+i, t, v class BatchLexer(RegexLexer): """ Lexer for the DOS/Windows Batch file format. *New in Pygments 0.7.* """ name = 'Batchfile' aliases = ['bat'] filenames = ['*.bat', '*.cmd'] mimetypes = ['application/x-dos-batch'] flags = re.MULTILINE | re.IGNORECASE tokens = { 'root': [ # Lines can start with @ to prevent echo (r'^\s*@', Punctuation), (r'^(\s*)(rem\s.*)$', bygroups(Text, Comment)), (r'".*?"', String.Double), (r"'.*?'", String.Single), # If made more specific, make sure you still allow expansions # like %~$VAR:zlt (r'%%?[~$:\w]+%?', Name.Variable), (r'::.*', Comment), # Technically :: only works at BOL (r'(set)(\s+)(\w+)', bygroups(Keyword, Text, Name.Variable)), (r'(call)(\s+)(:\w+)', bygroups(Keyword, Text, Name.Label)), (r'(goto)(\s+)(\w+)', bygroups(Keyword, Text, Name.Label)), (r'\b(set|call|echo|on|off|endlocal|for|do|goto|if|pause|' r'setlocal|shift|errorlevel|exist|defined|cmdextversion|' r'errorlevel|else|cd|md|del|deltree|cls|choice)\b', Keyword), (r'\b(equ|neq|lss|leq|gtr|geq)\b', Operator), include('basic'), (r'.', Text), ], 'echo': [ # Escapes only valid within echo args? (r'\^\^|\^<|\^>|\^\|', String.Escape), (r'\n', Text, '#pop'), include('basic'), (r'[^\'"^]+', Text), ], 'basic': [ (r'".*?"', String.Double), (r"'.*?'", String.Single), (r'`.*?`', String.Backtick), (r'-?\d+', Number), (r',', Punctuation), (r'=', Operator), (r'/\S+', Name), (r':\w+', Name.Label), (r'\w:\w+', Text), (r'([<>|])(\s*)(\w+)', bygroups(Punctuation, Text, Name)), ], } class RedcodeLexer(RegexLexer): """ A simple Redcode lexer based on ICWS'94. Contributed by Adam Blinkinsop <[email protected]>. *New in Pygments 0.8.* """ name = 'Redcode' aliases = ['redcode'] filenames = ['*.cw'] opcodes = ['DAT','MOV','ADD','SUB','MUL','DIV','MOD', 'JMP','JMZ','JMN','DJN','CMP','SLT','SPL', 'ORG','EQU','END'] modifiers = ['A','B','AB','BA','F','X','I'] tokens = { 'root': [ # Whitespace: (r'\s+', Text), (r';.*$', Comment.Single), # Lexemes: # Identifiers (r'\b(%s)\b' % '|'.join(opcodes), Name.Function), (r'\b(%s)\b' % '|'.join(modifiers), Name.Decorator), (r'[A-Za-z_][A-Za-z_0-9]+', Name), # Operators (r'[-+*/%]', Operator), (r'[#$@<>]', Operator), # mode (r'[.,]', Punctuation), # mode # Numbers (r'[-+]?\d+', Number.Integer), ], } class MOOCodeLexer(RegexLexer): """ For `MOOCode <http://www.moo.mud.org/>`_ (the MOO scripting language). *New in Pygments 0.9.* """ name = 'MOOCode' filenames = ['*.moo'] aliases = ['moocode'] mimetypes = ['text/x-moocode'] tokens = { 'root' : [ # Numbers (r'(0|[1-9][0-9_]*)', Number.Integer), # Strings (r'"(\\\\|\\"|[^"])*"', String), # exceptions (r'(E_PERM|E_DIV)', Name.Exception), # db-refs (r'((#[-0-9]+)|(\$[a-z_A-Z0-9]+))', Name.Entity), # Keywords (r'\b(if|else|elseif|endif|for|endfor|fork|endfork|while' r'|endwhile|break|continue|return|try' r'|except|endtry|finally|in)\b', Keyword), # builtins (r'(random|length)', Name.Builtin), # special variables (r'(player|caller|this|args)', Name.Variable.Instance), # skip whitespace (r'\s+', Text), (r'\n', Text), # other operators (r'([!;=,{}&\|:\.\[\]@\(\)\<\>\?]+)', Operator), # function call (r'([a-z_A-Z0-9]+)(\()', bygroups(Name.Function, Operator)), # variables (r'([a-zA-Z_0-9]+)', Text), ] } class SmalltalkLexer(RegexLexer): """ For `Smalltalk <http://www.smalltalk.org/>`_ syntax. Contributed by Stefan Matthias Aust. Rewritten by Nils Winter. *New in Pygments 0.10.* """ name = 'Smalltalk' filenames = ['*.st'] aliases = ['smalltalk', 'squeak'] mimetypes = ['text/x-smalltalk'] tokens = { 'root' : [ (r'(<)(\w+:)(.*?)(>)', bygroups(Text, Keyword, Text, Text)), include('squeak fileout'), include('whitespaces'), include('method definition'), (r'(\|)([\w\s]*)(\|)', bygroups(Operator, Name.Variable, Operator)), include('objects'), (r'\^|\:=|\_', Operator), # temporaries (r'[\]({}.;!]', Text), ], 'method definition' : [ # Not perfect can't allow whitespaces at the beginning and the # without breaking everything (r'([a-zA-Z]+\w*:)(\s*)(\w+)', bygroups(Name.Function, Text, Name.Variable)), (r'^(\b[a-zA-Z]+\w*\b)(\s*)$', bygroups(Name.Function, Text)), (r'^([-+*/\\~<>=|&!?,@%]+)(\s*)(\w+)(\s*)$', bygroups(Name.Function, Text, Name.Variable, Text)), ], 'blockvariables' : [ include('whitespaces'), (r'(:)(\s*)([A-Za-z\w]+)', bygroups(Operator, Text, Name.Variable)), (r'\|', Operator, '#pop'), (r'', Text, '#pop'), # else pop ], 'literals' : [ (r'\'[^\']*\'', String, 'afterobject'), (r'\$.', String.Char, 'afterobject'), (r'#\(', String.Symbol, 'parenth'), (r'\)', Text, 'afterobject'), (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number, 'afterobject'), ], '_parenth_helper' : [ include('whitespaces'), (r'[-+*/\\~<>=|&#!?,@%\w+:]+', String.Symbol), # literals (r'\'[^\']*\'', String), (r'\$.', String.Char), (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number), (r'#*\(', String.Symbol, 'inner_parenth'), ], 'parenth' : [ # This state is a bit tricky since # we can't just pop this state (r'\)', String.Symbol, ('root','afterobject')), include('_parenth_helper'), ], 'inner_parenth': [ (r'\)', String.Symbol, '#pop'), include('_parenth_helper'), ], 'whitespaces' : [ # skip whitespace and comments (r'\s+', Text), (r'"[^"]*"', Comment), ], 'objects' : [ (r'\[', Text, 'blockvariables'), (r'\]', Text, 'afterobject'), (r'\b(self|super|true|false|nil|thisContext)\b', Name.Builtin.Pseudo, 'afterobject'), (r'\b[A-Z]\w*(?!:)\b', Name.Class, 'afterobject'), (r'\b[a-z]\w*(?!:)\b', Name.Variable, 'afterobject'), (r'#("[^"]*"|[-+*/\\~<>=|&!?,@%]+|[\w:]+)', String.Symbol, 'afterobject'), include('literals'), ], 'afterobject' : [ (r'! !$', Keyword , '#pop'), # squeak chunk delimeter include('whitespaces'), (r'\b(ifTrue:|ifFalse:|whileTrue:|whileFalse:|timesRepeat:)', Name.Builtin, '#pop'), (r'\b(new\b(?!:))', Name.Builtin), (r'\:=|\_', Operator, '#pop'), (r'\b[a-zA-Z]+\w*:', Name.Function, '#pop'), (r'\b[a-zA-Z]+\w*', Name.Function), (r'\w+:?|[-+*/\\~<>=|&!?,@%]+', Name.Function, '#pop'), (r'\.', Punctuation, '#pop'), (r';', Punctuation), (r'[\])}]', Text), (r'[\[({]', Text, '#pop'), ], 'squeak fileout' : [ # Squeak fileout format (optional) (r'^"[^"]*"!', Keyword), (r"^'[^']*'!", Keyword), (r'^(!)(\w+)( commentStamp: )(.*?)( prior: .*?!\n)(.*?)(!)', bygroups(Keyword, Name.Class, Keyword, String, Keyword, Text, Keyword)), (r'^(!)(\w+(?: class)?)( methodsFor: )(\'[^\']*\')(.*?!)', bygroups(Keyword, Name.Class, Keyword, String, Keyword)), (r'^(\w+)( subclass: )(#\w+)' r'(\s+instanceVariableNames: )(.*?)' r'(\s+classVariableNames: )(.*?)' r'(\s+poolDictionaries: )(.*?)' r'(\s+category: )(.*?)(!)', bygroups(Name.Class, Keyword, String.Symbol, Keyword, String, Keyword, String, Keyword, String, Keyword, String, Keyword)), (r'^(\w+(?: class)?)(\s+instanceVariableNames: )(.*?)(!)', bygroups(Name.Class, Keyword, String, Keyword)), (r'(!\n)(\].*)(! !)$', bygroups(Keyword, Text, Keyword)), (r'! !$', Keyword), ], } class TcshLexer(RegexLexer): """ Lexer for tcsh scripts. *New in Pygments 0.10.* """ name = 'Tcsh' aliases = ['tcsh', 'csh'] filenames = ['*.tcsh', '*.csh'] mimetypes = ['application/x-csh'] tokens = { 'root': [ include('basic'), (r'\$\(', Keyword, 'paren'), (r'\${#?', Keyword, 'curly'), (r'`', String.Backtick, 'backticks'), include('data'), ], 'basic': [ (r'\b(if|endif|else|while|then|foreach|case|default|' r'continue|goto|breaksw|end|switch|endsw)\s*\b', Keyword), (r'\b(alias|alloc|bg|bindkey|break|builtins|bye|caller|cd|chdir|' r'complete|dirs|echo|echotc|eval|exec|exit|' r'fg|filetest|getxvers|glob|getspath|hashstat|history|hup|inlib|jobs|kill|' r'limit|log|login|logout|ls-F|migrate|newgrp|nice|nohup|notify|' r'onintr|popd|printenv|pushd|rehash|repeat|rootnode|popd|pushd|set|shift|' r'sched|setenv|setpath|settc|setty|setxvers|shift|source|stop|suspend|' r'source|suspend|telltc|time|' r'umask|unalias|uncomplete|unhash|universe|unlimit|unset|unsetenv|' r'ver|wait|warp|watchlog|where|which)\s*\b', Name.Builtin), (r'#.*\n', Comment), (r'\\[\w\W]', String.Escape), (r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)), (r'[\[\]{}()=]+', Operator), (r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String), ], 'data': [ (r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double), (r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single), (r'\s+', Text), (r'[^=\s\n\[\]{}()$"\'`\\]+', Text), (r'\d+(?= |\Z)', Number), (r'\$#?(\w+|.)', Name.Variable), ], 'curly': [ (r'}', Keyword, '#pop'), (r':-', Keyword), (r'[a-zA-Z0-9_]+', Name.Variable), (r'[^}:"\'`$]+', Punctuation), (r':', Punctuation), include('root'), ], 'paren': [ (r'\)', Keyword, '#pop'), include('root'), ], 'backticks': [ (r'`', String.Backtick, '#pop'), include('root'), ], } class LogtalkLexer(RegexLexer): """ For `Logtalk <http://logtalk.org/>`_ source code. *New in Pygments 0.10.* """ name = 'Logtalk' aliases = ['logtalk'] filenames = ['*.lgt'] mimetypes = ['text/x-logtalk'] tokens = { 'root': [ # Directives (r'^\s*:-\s',Punctuation,'directive'), # Comments (r'%.*?\n', Comment), (r'/\*(.|\n)*?\*/',Comment), # Whitespace (r'\n', Text), (r'\s+', Text), # Numbers (r"0'.", Number), (r'0b[01]+', Number), (r'0o[0-7]+', Number), (r'0x[0-9a-fA-F]+', Number), (r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number), # Variables (r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable), # Event handlers (r'(after|before)(?=[(])', Keyword), # Execution-context methods (r'(parameter|this|se(lf|nder))(?=[(])', Keyword), # Reflection (r'(current_predicate|predicate_property)(?=[(])', Keyword), # DCGs and term expansion (r'(expand_(goal|term)|(goal|term)_expansion|phrase)(?=[(])', Keyword), # Entity (r'(abolish|c(reate|urrent))_(object|protocol|category)(?=[(])', Keyword), (r'(object|protocol|category)_property(?=[(])', Keyword), # Entity relations (r'complements_object(?=[(])', Keyword), (r'extends_(object|protocol|category)(?=[(])', Keyword), (r'imp(lements_protocol|orts_category)(?=[(])', Keyword), (r'(instantiat|specializ)es_class(?=[(])', Keyword), # Events (r'(current_event|(abolish|define)_events)(?=[(])', Keyword), # Flags (r'(current|set)_logtalk_flag(?=[(])', Keyword), # Compiling, loading, and library paths (r'logtalk_(compile|l(ibrary_path|oad))(?=[(])', Keyword), # Database (r'(clause|retract(all)?)(?=[(])', Keyword), (r'a(bolish|ssert(a|z))(?=[(])', Keyword), # Control (r'(ca(ll|tch)|throw)(?=[(])', Keyword), (r'(fail|true)\b', Keyword), # All solutions (r'((bag|set)of|f(ind|or)all)(?=[(])', Keyword), # Multi-threading meta-predicates (r'threaded(_(call|once|ignore|exit|peek|wait|notify))?(?=[(])', Keyword), # Term unification (r'unify_with_occurs_check(?=[(])', Keyword), # Term creation and decomposition (r'(functor|arg|copy_term)(?=[(])', Keyword), # Evaluable functors (r'(rem|mod|abs|sign)(?=[(])', Keyword), (r'float(_(integer|fractional)_part)?(?=[(])', Keyword), (r'(floor|truncate|round|ceiling)(?=[(])', Keyword), # Other arithmetic functors (r'(cos|atan|exp|log|s(in|qrt))(?=[(])', Keyword), # Term testing (r'(var|atom(ic)?|integer|float|compound|n(onvar|umber))(?=[(])', Keyword), # Stream selection and control (r'(curren|se)t_(in|out)put(?=[(])', Keyword), (r'(open|close)(?=[(])', Keyword), (r'flush_output(?=[(])', Keyword), (r'(at_end_of_stream|flush_output)\b', Keyword), (r'(stream_property|at_end_of_stream|set_stream_position)(?=[(])', Keyword), # Character and byte input/output (r'(nl|(get|peek|put)_(byte|c(har|ode)))(?=[(])', Keyword), (r'\bnl\b', Keyword), # Term input/output (r'read(_term)?(?=[(])', Keyword), (r'write(q|_(canonical|term))?(?=[(])', Keyword), (r'(current_)?op(?=[(])', Keyword), (r'(current_)?char_conversion(?=[(])', Keyword), # Atomic term processing (r'atom_(length|c(hars|o(ncat|des)))(?=[(])', Keyword), (r'(char_code|sub_atom)(?=[(])', Keyword), (r'number_c(har|ode)s(?=[(])', Keyword), # Implementation defined hooks functions (r'(se|curren)t_prolog_flag(?=[(])', Keyword), (r'\bhalt\b', Keyword), (r'halt(?=[(])', Keyword), # Message sending operators (r'(::|:|\^\^)', Operator), # External call (r'[{}]', Keyword), # Logic and control (r'\bonce(?=[(])', Keyword), (r'\brepeat\b', Keyword), # Bitwise functors (r'(>>|<<|/\\|\\\\|\\)', Operator), # Arithemtic evaluation (r'\bis\b', Keyword), # Arithemtic comparison (r'(=:=|=\\=|<|=<|>=|>)', Operator), # Term creation and decomposition (r'=\.\.', Operator), # Term unification (r'(=|\\=)', Operator), # Term comparison (r'(==|\\==|@=<|@<|@>=|@>)', Operator), # Evaluable functors (r'(//|[-+*/])', Operator), (r'\b(mod|rem)\b', Operator), # Other arithemtic functors (r'\b\*\*\b', Operator), # DCG rules (r'-->', Operator), # Control constructs (r'([!;]|->)', Operator), # Logic and control (r'\\+', Operator), # Mode operators (r'[?@]', Operator), # Strings (r'"(\\\\|\\"|[^"])*"', String), # Ponctuation (r'[()\[\],.|]', Text), # Atoms (r"[a-z][a-zA-Z0-9_]*", Text), (r"[']", String, 'quoted_atom'), ], 'quoted_atom': [ (r"['][']", String), (r"[']", String, '#pop'), (r'\\([\\abfnrtv"\']|(x[a-fA-F0-9]+|[0-7]+)\\)', String.Escape), (r"[^\\'\n]+", String), (r'\\', String), ], 'directive': [ # Conditional compilation directives (r'(el)?if(?=[(])', Keyword, 'root'), (r'(e(lse|ndif))[.]', Keyword, 'root'), # Entity directives (r'(category|object|protocol)(?=[(])', Keyword, 'entityrelations'), (r'(end_(category|object|protocol))[.]',Keyword, 'root'), # Predicate scope directives (r'(public|protected|private)(?=[(])', Keyword, 'root'), # Other directives (r'e(n(coding|sure_loaded)|xport)(?=[(])', Keyword, 'root'), (r'in(fo|itialization)(?=[(])', Keyword, 'root'), (r'(dynamic|synchronized|threaded)[.]', Keyword, 'root'), (r'(alias|d(ynamic|iscontiguous)|m(eta_predicate|ode|ultifile)|' r's(et_(logtalk|prolog)_flag|ynchronized))(?=[(])', Keyword, 'root'), (r'op(?=[(])', Keyword, 'root'), (r'(calls|reexport|use(s|_module))(?=[(])', Keyword, 'root'), (r'[a-z][a-zA-Z0-9_]*(?=[(])', Text, 'root'), (r'[a-z][a-zA-Z0-9_]*[.]', Text, 'root'), ], 'entityrelations': [ (r'(extends|i(nstantiates|mp(lements|orts))|specializes)(?=[(])', Keyword), # Numbers (r"0'.", Number), (r'0b[01]+', Number), (r'0o[0-7]+', Number), (r'0x[0-9a-fA-F]+', Number), (r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number), # Variables (r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable), # Atoms (r"[a-z][a-zA-Z0-9_]*", Text), (r"[']", String, 'quoted_atom'), # Strings (r'"(\\\\|\\"|[^"])*"', String), # End of entity-opening directive (r'([)]\.)', Text, 'root'), # Scope operator (r'(::)', Operator), # Ponctuation (r'[()\[\],.|]', Text), # Comments (r'%.*?\n', Comment), (r'/\*(.|\n)*?\*/',Comment), # Whitespace (r'\n', Text), (r'\s+', Text), ] } def analyse_text(text): if ':- object(' in text: return True if ':- protocol(' in text: return True if ':- category(' in text: return True return False def _shortened(word): dpos = word.find('$') return '|'.join([word[:dpos] + word[dpos+1:i] + r'\b' for i in range(len(word), dpos, -1)]) def _shortened_many(*words): return '|'.join(map(_shortened, words)) class GnuplotLexer(RegexLexer): """ For `Gnuplot <http://gnuplot.info/>`_ plotting scripts. *New in Pygments 0.11.* """ name = 'Gnuplot' aliases = ['gnuplot'] filenames = ['*.plot', '*.plt'] mimetypes = ['text/x-gnuplot'] tokens = { 'root': [ include('whitespace'), (_shortened('bi$nd'), Keyword, 'bind'), (_shortened_many('ex$it', 'q$uit'), Keyword, 'quit'), (_shortened('f$it'), Keyword, 'fit'), (r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation), 'if'), (r'else\b', Keyword), (_shortened('pa$use'), Keyword, 'pause'), (_shortened_many('p$lot', 'rep$lot', 'sp$lot'), Keyword, 'plot'), (_shortened('sa$ve'), Keyword, 'save'), (_shortened('se$t'), Keyword, ('genericargs', 'optionarg')), (_shortened_many('sh$ow', 'uns$et'), Keyword, ('noargs', 'optionarg')), (_shortened_many('low$er', 'ra$ise', 'ca$ll', 'cd$', 'cl$ear', 'h$elp', '\\?$', 'hi$story', 'l$oad', 'pr$int', 'pwd$', 're$read', 'res$et', 'scr$eendump', 'she$ll', 'sy$stem', 'up$date'), Keyword, 'genericargs'), (_shortened_many('pwd$', 're$read', 'res$et', 'scr$eendump', 'she$ll', 'test$'), Keyword, 'noargs'), ('([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(=)', bygroups(Name.Variable, Text, Operator), 'genericargs'), ('([a-zA-Z_][a-zA-Z0-9_]*)(\s*\(.*?\)\s*)(=)', bygroups(Name.Function, Text, Operator), 'genericargs'), (r'@[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant), # macros (r';', Keyword), ], 'comment': [ (r'[^\\\n]', Comment), (r'\\\n', Comment), (r'\\', Comment), # don't add the newline to the Comment token ('', Comment, '#pop'), ], 'whitespace': [ ('#', Comment, 'comment'), (r'[ \t\v\f]+', Text), ], 'noargs': [ include('whitespace'), # semicolon and newline end the argument list (r';', Punctuation, '#pop'), (r'\n', Text, '#pop'), ], 'dqstring': [ (r'"', String, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), (r'[^\\"\n]+', String), # all other characters (r'\\\n', String), # line continuation (r'\\', String), # stray backslash (r'\n', String, '#pop'), # newline ends the string too ], 'sqstring': [ (r"''", String), # escaped single quote (r"'", String, '#pop'), (r"[^\\'\n]+", String), # all other characters (r'\\\n', String), # line continuation (r'\\', String), # normal backslash (r'\n', String, '#pop'), # newline ends the string too ], 'genericargs': [ include('noargs'), (r'"', String, 'dqstring'), (r"'", String, 'sqstring'), (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float), (r'(\d+\.\d*|\.\d+)', Number.Float), (r'-?\d+', Number.Integer), ('[,.~!%^&*+=|?:<>/-]', Operator), ('[{}()\[\]]', Punctuation), (r'(eq|ne)\b', Operator.Word), (r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(\()', bygroups(Name.Function, Text, Punctuation)), (r'[a-zA-Z_][a-zA-Z0-9_]*', Name), (r'@[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant), # macros (r'\\\n', Text), ], 'optionarg': [ include('whitespace'), (_shortened_many( "a$ll","an$gles","ar$row","au$toscale","b$ars","bor$der", "box$width","cl$abel","c$lip","cn$trparam","co$ntour","da$ta", "data$file","dg$rid3d","du$mmy","enc$oding","dec$imalsign", "fit$","font$path","fo$rmat","fu$nction","fu$nctions","g$rid", "hid$den3d","his$torysize","is$osamples","k$ey","keyt$itle", "la$bel","li$nestyle","ls$","loa$dpath","loc$ale","log$scale", "mac$ros","map$ping","map$ping3d","mar$gin","lmar$gin", "rmar$gin","tmar$gin","bmar$gin","mo$use","multi$plot", "mxt$ics","nomxt$ics","mx2t$ics","nomx2t$ics","myt$ics", "nomyt$ics","my2t$ics","nomy2t$ics","mzt$ics","nomzt$ics", "mcbt$ics","nomcbt$ics","of$fsets","or$igin","o$utput", "pa$rametric","pm$3d","pal$ette","colorb$ox","p$lot", "poi$ntsize","pol$ar","pr$int","obj$ect","sa$mples","si$ze", "st$yle","su$rface","table$","t$erminal","termo$ptions","ti$cs", "ticsc$ale","ticsl$evel","timef$mt","tim$estamp","tit$le", "v$ariables","ve$rsion","vi$ew","xyp$lane","xda$ta","x2da$ta", "yda$ta","y2da$ta","zda$ta","cbda$ta","xl$abel","x2l$abel", "yl$abel","y2l$abel","zl$abel","cbl$abel","xti$cs","noxti$cs", "x2ti$cs","nox2ti$cs","yti$cs","noyti$cs","y2ti$cs","noy2ti$cs", "zti$cs","nozti$cs","cbti$cs","nocbti$cs","xdti$cs","noxdti$cs", "x2dti$cs","nox2dti$cs","ydti$cs","noydti$cs","y2dti$cs", "noy2dti$cs","zdti$cs","nozdti$cs","cbdti$cs","nocbdti$cs", "xmti$cs","noxmti$cs","x2mti$cs","nox2mti$cs","ymti$cs", "noymti$cs","y2mti$cs","noy2mti$cs","zmti$cs","nozmti$cs", "cbmti$cs","nocbmti$cs","xr$ange","x2r$ange","yr$ange", "y2r$ange","zr$ange","cbr$ange","rr$ange","tr$ange","ur$ange", "vr$ange","xzeroa$xis","x2zeroa$xis","yzeroa$xis","y2zeroa$xis", "zzeroa$xis","zeroa$xis","z$ero"), Name.Builtin, '#pop'), ], 'bind': [ ('!', Keyword, '#pop'), (_shortened('all$windows'), Name.Builtin), include('genericargs'), ], 'quit': [ (r'gnuplot\b', Keyword), include('noargs'), ], 'fit': [ (r'via\b', Name.Builtin), include('plot'), ], 'if': [ (r'\)', Punctuation, '#pop'), include('genericargs'), ], 'pause': [ (r'(mouse|any|button1|button2|button3)\b', Name.Builtin), (_shortened('key$press'), Name.Builtin), include('genericargs'), ], 'plot': [ (_shortened_many('ax$es', 'axi$s', 'bin$ary', 'ev$ery', 'i$ndex', 'mat$rix', 's$mooth', 'thru$', 't$itle', 'not$itle', 'u$sing', 'w$ith'), Name.Builtin), include('genericargs'), ], 'save': [ (_shortened_many('f$unctions', 's$et', 't$erminal', 'v$ariables'), Name.Builtin), include('genericargs'), ], } class PovrayLexer(RegexLexer): """ For `Persistence of Vision Raytracer <http://www.povray.org/>`_ files. *New in Pygments 0.11.* """ name = 'POVRay' aliases = ['pov'] filenames = ['*.pov', '*.inc'] mimetypes = ['text/x-povray'] tokens = { 'root': [ (r'/\*[\w\W]*?\*/', Comment.Multiline), (r'//.*\n', Comment.Single), (r'(?s)"(?:\\.|[^"\\])+"', String.Double), (r'#(debug|default|else|end|error|fclose|fopen|if|ifdef|ifndef|' r'include|range|read|render|statistics|switch|undef|version|' r'warning|while|write|define|macro|local|declare)', Comment.Preproc), (r'\b(aa_level|aa_threshold|abs|acos|acosh|adaptive|adc_bailout|' r'agate|agate_turb|all|alpha|ambient|ambient_light|angle|' r'aperture|arc_angle|area_light|asc|asin|asinh|assumed_gamma|' r'atan|atan2|atanh|atmosphere|atmospheric_attenuation|' r'attenuating|average|background|black_hole|blue|blur_samples|' r'bounded_by|box_mapping|bozo|break|brick|brick_size|' r'brightness|brilliance|bumps|bumpy1|bumpy2|bumpy3|bump_map|' r'bump_size|case|caustics|ceil|checker|chr|clipped_by|clock|' r'color|color_map|colour|colour_map|component|composite|concat|' r'confidence|conic_sweep|constant|control0|control1|cos|cosh|' r'count|crackle|crand|cube|cubic_spline|cylindrical_mapping|' r'debug|declare|default|degrees|dents|diffuse|direction|' r'distance|distance_maximum|div|dust|dust_type|eccentricity|' r'else|emitting|end|error|error_bound|exp|exponent|' r'fade_distance|fade_power|falloff|falloff_angle|false|' r'file_exists|filter|finish|fisheye|flatness|flip|floor|' r'focal_point|fog|fog_alt|fog_offset|fog_type|frequency|gif|' r'global_settings|glowing|gradient|granite|gray_threshold|' r'green|halo|hexagon|hf_gray_16|hierarchy|hollow|hypercomplex|' r'if|ifdef|iff|image_map|incidence|include|int|interpolate|' r'inverse|ior|irid|irid_wavelength|jitter|lambda|leopard|' r'linear|linear_spline|linear_sweep|location|log|looks_like|' r'look_at|low_error_factor|mandel|map_type|marble|material_map|' r'matrix|max|max_intersections|max_iteration|max_trace_level|' r'max_value|metallic|min|minimum_reuse|mod|mortar|' r'nearest_count|no|normal|normal_map|no_shadow|number_of_waves|' r'octaves|off|offset|omega|omnimax|on|once|onion|open|' r'orthographic|panoramic|pattern1|pattern2|pattern3|' r'perspective|pgm|phase|phong|phong_size|pi|pigment|' r'pigment_map|planar_mapping|png|point_at|pot|pow|ppm|' r'precision|pwr|quadratic_spline|quaternion|quick_color|' r'quick_colour|quilted|radial|radians|radiosity|radius|rainbow|' r'ramp_wave|rand|range|reciprocal|recursion_limit|red|' r'reflection|refraction|render|repeat|rgb|rgbf|rgbft|rgbt|' r'right|ripples|rotate|roughness|samples|scale|scallop_wave|' r'scattering|seed|shadowless|sin|sine_wave|sinh|sky|sky_sphere|' r'slice|slope_map|smooth|specular|spherical_mapping|spiral|' r'spiral1|spiral2|spotlight|spotted|sqr|sqrt|statistics|str|' r'strcmp|strength|strlen|strlwr|strupr|sturm|substr|switch|sys|' r't|tan|tanh|test_camera_1|test_camera_2|test_camera_3|' r'test_camera_4|texture|texture_map|tga|thickness|threshold|' r'tightness|tile2|tiles|track|transform|translate|transmit|' r'triangle_wave|true|ttf|turbulence|turb_depth|type|' r'ultra_wide_angle|up|use_color|use_colour|use_index|u_steps|' r'val|variance|vaxis_rotate|vcross|vdot|version|vlength|' r'vnormalize|volume_object|volume_rendered|vol_with_light|' r'vrotate|v_steps|warning|warp|water_level|waves|while|width|' r'wood|wrinkles|yes)\b', Keyword), (r'bicubic_patch|blob|box|camera|cone|cubic|cylinder|difference|' r'disc|height_field|intersection|julia_fractal|lathe|' r'light_source|merge|mesh|object|plane|poly|polygon|prism|' r'quadric|quartic|smooth_triangle|sor|sphere|superellipsoid|' r'text|torus|triangle|union', Name.Builtin), # TODO: <=, etc (r'[\[\](){}<>;,]', Punctuation), (r'[-+*/=]', Operator), (r'\b(x|y|z|u|v)\b', Name.Builtin.Pseudo), (r'[a-zA-Z_][a-zA-Z_0-9]*', Name), (r'[0-9]+\.[0-9]*', Number.Float), (r'\.[0-9]+', Number.Float), (r'[0-9]+', Number.Integer), (r'\s+', Text), ] } class AppleScriptLexer(RegexLexer): """ For `AppleScript source code <http://developer.apple.com/documentation/AppleScript/ Conceptual/AppleScriptLangGuide>`_, including `AppleScript Studio <http://developer.apple.com/documentation/AppleScript/ Reference/StudioReference>`_. Contributed by Andreas Amann <[email protected]>. """ name = 'AppleScript' aliases = ['applescript'] filenames = ['*.applescript'] flags = re.MULTILINE | re.DOTALL Identifiers = r'[a-zA-Z]\w*' Literals = ['AppleScript', 'current application', 'false', 'linefeed', 'missing value', 'pi','quote', 'result', 'return', 'space', 'tab', 'text item delimiters', 'true', 'version'] Classes = ['alias ', 'application ', 'boolean ', 'class ', 'constant ', 'date ', 'file ', 'integer ', 'list ', 'number ', 'POSIX file ', 'real ', 'record ', 'reference ', 'RGB color ', 'script ', 'text ', 'unit types', '(Unicode )?text', 'string'] BuiltIn = ['attachment', 'attribute run', 'character', 'day', 'month', 'paragraph', 'word', 'year'] HandlerParams = ['about', 'above', 'against', 'apart from', 'around', 'aside from', 'at', 'below', 'beneath', 'beside', 'between', 'for', 'given', 'instead of', 'on', 'onto', 'out of', 'over', 'since'] Commands = ['ASCII (character|number)', 'activate', 'beep', 'choose URL', 'choose application', 'choose color', 'choose file( name)?', 'choose folder', 'choose from list', 'choose remote application', 'clipboard info', 'close( access)?', 'copy', 'count', 'current date', 'delay', 'delete', 'display (alert|dialog)', 'do shell script', 'duplicate', 'exists', 'get eof', 'get volume settings', 'info for', 'launch', 'list (disks|folder)', 'load script', 'log', 'make', 'mount volume', 'new', 'offset', 'open( (for access|location))?', 'path to', 'print', 'quit', 'random number', 'read', 'round', 'run( script)?', 'say', 'scripting components', 'set (eof|the clipboard to|volume)', 'store script', 'summarize', 'system attribute', 'system info', 'the clipboard', 'time to GMT', 'write', 'quoted form'] References = ['(in )?back of', '(in )?front of', '[0-9]+(st|nd|rd|th)', 'first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh', 'eighth', 'ninth', 'tenth', 'after', 'back', 'before', 'behind', 'every', 'front', 'index', 'last', 'middle', 'some', 'that', 'through', 'thru', 'where', 'whose'] Operators = ["and", "or", "is equal", "equals", "(is )?equal to", "is not", "isn't", "isn't equal( to)?", "is not equal( to)?", "doesn't equal", "does not equal", "(is )?greater than", "comes after", "is not less than or equal( to)?", "isn't less than or equal( to)?", "(is )?less than", "comes before", "is not greater than or equal( to)?", "isn't greater than or equal( to)?", "(is )?greater than or equal( to)?", "is not less than", "isn't less than", "does not come before", "doesn't come before", "(is )?less than or equal( to)?", "is not greater than", "isn't greater than", "does not come after", "doesn't come after", "starts? with", "begins? with", "ends? with", "contains?", "does not contain", "doesn't contain", "is in", "is contained by", "is not in", "is not contained by", "isn't contained by", "div", "mod", "not", "(a )?(ref( to)?|reference to)", "is", "does"] Control = ['considering', 'else', 'error', 'exit', 'from', 'if', 'ignoring', 'in', 'repeat', 'tell', 'then', 'times', 'to', 'try', 'until', 'using terms from', 'while', 'whith', 'with timeout( of)?', 'with transaction', 'by', 'continue', 'end', 'its?', 'me', 'my', 'return', 'of' , 'as'] Declarations = ['global', 'local', 'prop(erty)?', 'set', 'get'] Reserved = ['but', 'put', 'returning', 'the'] StudioClasses = ['action cell', 'alert reply', 'application', 'box', 'browser( cell)?', 'bundle', 'button( cell)?', 'cell', 'clip view', 'color well', 'color-panel', 'combo box( item)?', 'control', 'data( (cell|column|item|row|source))?', 'default entry', 'dialog reply', 'document', 'drag info', 'drawer', 'event', 'font(-panel)?', 'formatter', 'image( (cell|view))?', 'matrix', 'menu( item)?', 'item', 'movie( view)?', 'open-panel', 'outline view', 'panel', 'pasteboard', 'plugin', 'popup button', 'progress indicator', 'responder', 'save-panel', 'scroll view', 'secure text field( cell)?', 'slider', 'sound', 'split view', 'stepper', 'tab view( item)?', 'table( (column|header cell|header view|view))', 'text( (field( cell)?|view))?', 'toolbar( item)?', 'user-defaults', 'view', 'window'] StudioEvents = ['accept outline drop', 'accept table drop', 'action', 'activated', 'alert ended', 'awake from nib', 'became key', 'became main', 'begin editing', 'bounds changed', 'cell value', 'cell value changed', 'change cell value', 'change item value', 'changed', 'child of item', 'choose menu item', 'clicked', 'clicked toolbar item', 'closed', 'column clicked', 'column moved', 'column resized', 'conclude drop', 'data representation', 'deminiaturized', 'dialog ended', 'document nib name', 'double clicked', 'drag( (entered|exited|updated))?', 'drop', 'end editing', 'exposed', 'idle', 'item expandable', 'item value', 'item value changed', 'items changed', 'keyboard down', 'keyboard up', 'launched', 'load data representation', 'miniaturized', 'mouse down', 'mouse dragged', 'mouse entered', 'mouse exited', 'mouse moved', 'mouse up', 'moved', 'number of browser rows', 'number of items', 'number of rows', 'open untitled', 'opened', 'panel ended', 'parameters updated', 'plugin loaded', 'prepare drop', 'prepare outline drag', 'prepare outline drop', 'prepare table drag', 'prepare table drop', 'read from file', 'resigned active', 'resigned key', 'resigned main', 'resized( sub views)?', 'right mouse down', 'right mouse dragged', 'right mouse up', 'rows changed', 'scroll wheel', 'selected tab view item', 'selection changed', 'selection changing', 'should begin editing', 'should close', 'should collapse item', 'should end editing', 'should expand item', 'should open( untitled)?', 'should quit( after last window closed)?', 'should select column', 'should select item', 'should select row', 'should select tab view item', 'should selection change', 'should zoom', 'shown', 'update menu item', 'update parameters', 'update toolbar item', 'was hidden', 'was miniaturized', 'will become active', 'will close', 'will dismiss', 'will display browser cell', 'will display cell', 'will display item cell', 'will display outline cell', 'will finish launching', 'will hide', 'will miniaturize', 'will move', 'will open', 'will pop up', 'will quit', 'will resign active', 'will resize( sub views)?', 'will select tab view item', 'will show', 'will zoom', 'write to file', 'zoomed'] StudioCommands = ['animate', 'append', 'call method', 'center', 'close drawer', 'close panel', 'display', 'display alert', 'display dialog', 'display panel', 'go', 'hide', 'highlight', 'increment', 'item for', 'load image', 'load movie', 'load nib', 'load panel', 'load sound', 'localized string', 'lock focus', 'log', 'open drawer', 'path for', 'pause', 'perform action', 'play', 'register', 'resume', 'scroll', 'select( all)?', 'show', 'size to fit', 'start', 'step back', 'step forward', 'stop', 'synchronize', 'unlock focus', 'update'] StudioProperties = ['accepts arrow key', 'action method', 'active', 'alignment', 'allowed identifiers', 'allows branch selection', 'allows column reordering', 'allows column resizing', 'allows column selection', 'allows customization', 'allows editing text attributes', 'allows empty selection', 'allows mixed state', 'allows multiple selection', 'allows reordering', 'allows undo', 'alpha( value)?', 'alternate image', 'alternate increment value', 'alternate title', 'animation delay', 'associated file name', 'associated object', 'auto completes', 'auto display', 'auto enables items', 'auto repeat', 'auto resizes( outline column)?', 'auto save expanded items', 'auto save name', 'auto save table columns', 'auto saves configuration', 'auto scroll', 'auto sizes all columns to fit', 'auto sizes cells', 'background color', 'bezel state', 'bezel style', 'bezeled', 'border rect', 'border type', 'bordered', 'bounds( rotation)?', 'box type', 'button returned', 'button type', 'can choose directories', 'can choose files', 'can draw', 'can hide', 'cell( (background color|size|type))?', 'characters', 'class', 'click count', 'clicked( data)? column', 'clicked data item', 'clicked( data)? row', 'closeable', 'collating', 'color( (mode|panel))', 'command key down', 'configuration', 'content(s| (size|view( margins)?))?', 'context', 'continuous', 'control key down', 'control size', 'control tint', 'control view', 'controller visible', 'coordinate system', 'copies( on scroll)?', 'corner view', 'current cell', 'current column', 'current( field)? editor', 'current( menu)? item', 'current row', 'current tab view item', 'data source', 'default identifiers', 'delta (x|y|z)', 'destination window', 'directory', 'display mode', 'displayed cell', 'document( (edited|rect|view))?', 'double value', 'dragged column', 'dragged distance', 'dragged items', 'draws( cell)? background', 'draws grid', 'dynamically scrolls', 'echos bullets', 'edge', 'editable', 'edited( data)? column', 'edited data item', 'edited( data)? row', 'enabled', 'enclosing scroll view', 'ending page', 'error handling', 'event number', 'event type', 'excluded from windows menu', 'executable path', 'expanded', 'fax number', 'field editor', 'file kind', 'file name', 'file type', 'first responder', 'first visible column', 'flipped', 'floating', 'font( panel)?', 'formatter', 'frameworks path', 'frontmost', 'gave up', 'grid color', 'has data items', 'has horizontal ruler', 'has horizontal scroller', 'has parent data item', 'has resize indicator', 'has shadow', 'has sub menu', 'has vertical ruler', 'has vertical scroller', 'header cell', 'header view', 'hidden', 'hides when deactivated', 'highlights by', 'horizontal line scroll', 'horizontal page scroll', 'horizontal ruler view', 'horizontally resizable', 'icon image', 'id', 'identifier', 'ignores multiple clicks', 'image( (alignment|dims when disabled|frame style|' 'scaling))?', 'imports graphics', 'increment value', 'indentation per level', 'indeterminate', 'index', 'integer value', 'intercell spacing', 'item height', 'key( (code|equivalent( modifier)?|window))?', 'knob thickness', 'label', 'last( visible)? column', 'leading offset', 'leaf', 'level', 'line scroll', 'loaded', 'localized sort', 'location', 'loop mode', 'main( (bunde|menu|window))?', 'marker follows cell', 'matrix mode', 'maximum( content)? size', 'maximum visible columns', 'menu( form representation)?', 'miniaturizable', 'miniaturized', 'minimized image', 'minimized title', 'minimum column width', 'minimum( content)? size', 'modal', 'modified', 'mouse down state', 'movie( (controller|file|rect))?', 'muted', 'name', 'needs display', 'next state', 'next text', 'number of tick marks', 'only tick mark values', 'opaque', 'open panel', 'option key down', 'outline table column', 'page scroll', 'pages across', 'pages down', 'palette label', 'pane splitter', 'parent data item', 'parent window', 'pasteboard', 'path( (names|separator))?', 'playing', 'plays every frame', 'plays selection only', 'position', 'preferred edge', 'preferred type', 'pressure', 'previous text', 'prompt', 'properties', 'prototype cell', 'pulls down', 'rate', 'released when closed', 'repeated', 'requested print time', 'required file type', 'resizable', 'resized column', 'resource path', 'returns records', 'reuses columns', 'rich text', 'roll over', 'row height', 'rulers visible', 'save panel', 'scripts path', 'scrollable', 'selectable( identifiers)?', 'selected cell', 'selected( data)? columns?', 'selected data items?', 'selected( data)? rows?', 'selected item identifier', 'selection by rect', 'send action on arrow key', 'sends action when done editing', 'separates columns', 'separator item', 'sequence number', 'services menu', 'shared frameworks path', 'shared support path', 'sheet', 'shift key down', 'shows alpha', 'shows state by', 'size( mode)?', 'smart insert delete enabled', 'sort case sensitivity', 'sort column', 'sort order', 'sort type', 'sorted( data rows)?', 'sound', 'source( mask)?', 'spell checking enabled', 'starting page', 'state', 'string value', 'sub menu', 'super menu', 'super view', 'tab key traverses cells', 'tab state', 'tab type', 'tab view', 'table view', 'tag', 'target( printer)?', 'text color', 'text container insert', 'text container origin', 'text returned', 'tick mark position', 'time stamp', 'title(d| (cell|font|height|position|rect))?', 'tool tip', 'toolbar', 'trailing offset', 'transparent', 'treat packages as directories', 'truncated labels', 'types', 'unmodified characters', 'update views', 'use sort indicator', 'user defaults', 'uses data source', 'uses ruler', 'uses threaded animation', 'uses title from previous column', 'value wraps', 'version', 'vertical( (line scroll|page scroll|ruler view))?', 'vertically resizable', 'view', 'visible( document rect)?', 'volume', 'width', 'window', 'windows menu', 'wraps', 'zoomable', 'zoomed'] tokens = { 'root': [ (r'\s+', Text), (ur'¬\n', String.Escape), (r"'s\s+", Text), # This is a possessive, consider moving (r'(--|#).*?$', Comment), (r'\(\*', Comment.Multiline, 'comment'), (r'[\(\){}!,.:]', Punctuation), (ur'(«)([^»]+)(»)', bygroups(Text, Name.Builtin, Text)), (r'\b((?:considering|ignoring)\s*)' r'(application responses|case|diacriticals|hyphens|' r'numeric strings|punctuation|white space)', bygroups(Keyword, Name.Builtin)), (ur'(-|\*|\+|&|≠|>=?|<=?|=|≥|≤|/|÷|\^)', Operator), (r"\b(%s)\b" % '|'.join(Operators), Operator.Word), (r'^(\s*(?:on|end)\s+)' r'(%s)' % '|'.join(StudioEvents), bygroups(Keyword, Name.Function)), (r'^(\s*)(in|on|script|to)(\s+)', bygroups(Text, Keyword, Text)), (r'\b(as )(%s)\b' % '|'.join(Classes), bygroups(Keyword, Name.Class)), (r'\b(%s)\b' % '|'.join(Literals), Name.Constant), (r'\b(%s)\b' % '|'.join(Commands), Name.Builtin), (r'\b(%s)\b' % '|'.join(Control), Keyword), (r'\b(%s)\b' % '|'.join(Declarations), Keyword), (r'\b(%s)\b' % '|'.join(Reserved), Name.Builtin), (r'\b(%s)s?\b' % '|'.join(BuiltIn), Name.Builtin), (r'\b(%s)\b' % '|'.join(HandlerParams), Name.Builtin), (r'\b(%s)\b' % '|'.join(StudioProperties), Name.Attribute), (r'\b(%s)s?\b' % '|'.join(StudioClasses), Name.Builtin), (r'\b(%s)\b' % '|'.join(StudioCommands), Name.Builtin), (r'\b(%s)\b' % '|'.join(References), Name.Builtin), (r'"(\\\\|\\"|[^"])*"', String.Double), (r'\b(%s)\b' % Identifiers, Name.Variable), (r'[-+]?(\d+\.\d*|\d*\.\d+)(E[-+][0-9]+)?', Number.Float), (r'[-+]?\d+', Number.Integer), ], 'comment': [ ('\(\*', Comment.Multiline, '#push'), ('\*\)', Comment.Multiline, '#pop'), ('[^*(]+', Comment.Multiline), ('[*(]', Comment.Multiline), ], } class ModelicaLexer(RegexLexer): """ For `Modelica <http://www.modelica.org/>`_ source code. *New in Pygments 1.1.* """ name = 'Modelica' aliases = ['modelica'] filenames = ['*.mo'] mimetypes = ['text/x-modelica'] flags = re.IGNORECASE | re.DOTALL tokens = { 'whitespace': [ (r'\n', Text), (r'\s+', Text), (r'\\\n', Text), # line continuation (r'//(\n|(.|\n)*?[^\\]\n)', Comment), (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment), ], 'statements': [ (r'"', String, 'string'), (r'(\d+\.\d*|\.\d+|\d+|\d.)[eE][+-]?\d+[lL]?', Number.Float), (r'(\d+\.\d*|\.\d+)', Number.Float), (r'\d+[Ll]?', Number.Integer), (r'[~!%^&*+=|?:<>/-]', Operator), (r'[()\[\]{},.;]', Punctuation), (r'(true|false|NULL|Real|Integer|Boolean)\b', Name.Builtin), (r"([a-zA-Z_][\w]*|'[a-zA-Z_\+\-\*\/\^][\w]*')" r"(\.([a-zA-Z_][\w]*|'[a-zA-Z_\+\-\*\/\^][\w]*'))+", Name.Class), (r"('[\w\+\-\*\/\^]+'|\w+)", Name) ], 'root': [ include('whitespace'), include('keywords'), include('functions'), include('operators'), include('classes'), (r'("<html>|<html>)', Name.Tag, 'html-content'), include('statements') ], 'keywords': [ (r'(algorithm|annotation|break|connect|constant|constrainedby|' r'discrete|each|else|elseif|elsewhen|encapsulated|enumeration|' r'end|equation|exit|expandable|extends|' r'external|false|final|flow|for|if|import|in|inner|input|' r'loop|nondiscrete|outer|output|parameter|partial|' r'protected|public|redeclare|replaceable|stream|time|then|true|' r'when|while|within)\b', Keyword) ], 'functions': [ (r'(abs|acos|acosh|asin|asinh|atan|atan2|atan3|ceil|cos|cosh|' r'cross|div|exp|floor|log|log10|mod|rem|sign|sin|sinh|size|' r'sqrt|tan|tanh|zeros)\b', Name.Function) ], 'operators': [ (r'(and|assert|cardinality|change|delay|der|edge|initial|' r'noEvent|not|or|pre|reinit|return|sample|smooth|' r'terminal|terminate)\b', Name.Builtin) ], 'classes': [ (r'(block|class|connector|function|model|package|' r'record|type)\b', Name.Class) ], 'string': [ (r'"', String, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), (r'[^\\"\n]+', String), # all other characters (r'\\\n', String), # line continuation (r'\\', String) # stray backslash ], 'html-content': [ (r'<\s*/\s*html\s*>', Name.Tag, '#pop'), (r'.+?(?=<\s*/\s*html\s*>)', using(HtmlLexer)), ] } class RebolLexer(RegexLexer): """ A `REBOL <http://www.rebol.com/>`_ lexer. *New in Pygments 1.1.* """ name = 'REBOL' aliases = ['rebol'] filenames = ['*.r', '*.r3'] mimetypes = ['text/x-rebol'] flags = re.IGNORECASE | re.MULTILINE re.IGNORECASE escape_re = r'(?:\^\([0-9a-fA-F]{1,4}\)*)' def word_callback(lexer, match): word = match.group() if re.match(".*:$", word): yield match.start(), Generic.Subheading, word elif re.match( r'(native|alias|all|any|as-string|as-binary|bind|bound\?|case|' r'catch|checksum|comment|debase|dehex|exclude|difference|disarm|' r'either|else|enbase|foreach|remove-each|form|free|get|get-env|if|' r'in|intersect|loop|minimum-of|maximum-of|mold|new-line|' r'new-line\?|not|now|prin|print|reduce|compose|construct|repeat|' r'reverse|save|script\?|set|shift|switch|throw|to-hex|trace|try|' r'type\?|union|unique|unless|unprotect|unset|until|use|value\?|' r'while|compress|decompress|secure|open|close|read|read-io|' r'write-io|write|update|query|wait|input\?|exp|log-10|log-2|' r'log-e|square-root|cosine|sine|tangent|arccosine|arcsine|' r'arctangent|protect|lowercase|uppercase|entab|detab|connected\?|' r'browse|launch|stats|get-modes|set-modes|to-local-file|' r'to-rebol-file|encloak|decloak|create-link|do-browser|bind\?|' r'hide|draw|show|size-text|textinfo|offset-to-caret|' r'caret-to-offset|local-request-file|rgb-to-hsv|hsv-to-rgb|' r'crypt-strength\?|dh-make-key|dh-generate-key|dh-compute-key|' r'dsa-make-key|dsa-generate-key|dsa-make-signature|' r'dsa-verify-signature|rsa-make-key|rsa-generate-key|' r'rsa-encrypt)$', word): yield match.start(), Name.Builtin, word elif re.match( r'(add|subtract|multiply|divide|remainder|power|and~|or~|xor~|' r'minimum|maximum|negate|complement|absolute|random|head|tail|' r'next|back|skip|at|pick|first|second|third|fourth|fifth|sixth|' r'seventh|eighth|ninth|tenth|last|path|find|select|make|to|copy\*|' r'insert|remove|change|poke|clear|trim|sort|min|max|abs|cp|' r'copy)$', word): yield match.start(), Name.Function, word elif re.match( r'(error|source|input|license|help|install|echo|Usage|with|func|' r'throw-on-error|function|does|has|context|probe|\?\?|as-pair|' r'mod|modulo|round|repend|about|set-net|append|join|rejoin|reform|' r'remold|charset|array|replace|move|extract|forskip|forall|alter|' r'first+|also|take|for|forever|dispatch|attempt|what-dir|' r'change-dir|clean-path|list-dir|dirize|rename|split-path|delete|' r'make-dir|delete-dir|in-dir|confirm|dump-obj|upgrade|what|' r'build-tag|process-source|build-markup|decode-cgi|read-cgi|' r'write-user|save-user|set-user-name|protect-system|parse-xml|' r'cvs-date|cvs-version|do-boot|get-net-info|desktop|layout|' r'scroll-para|get-face|alert|set-face|uninstall|unfocus|' r'request-dir|center-face|do-events|net-error|decode-url|' r'parse-header|parse-header-date|parse-email-addrs|import-email|' r'send|build-attach-body|resend|show-popup|hide-popup|open-events|' r'find-key-face|do-face|viewtop|confine|find-window|' r'insert-event-func|remove-event-func|inform|dump-pane|dump-face|' r'flag-face|deflag-face|clear-fields|read-net|vbug|path-thru|' r'read-thru|load-thru|do-thru|launch-thru|load-image|' r'request-download|do-face-alt|set-font|set-para|get-style|' r'set-style|make-face|stylize|choose|hilight-text|hilight-all|' r'unlight-text|focus|scroll-drag|clear-face|reset-face|scroll-face|' r'resize-face|load-stock|load-stock-block|notify|request|flash|' r'request-color|request-pass|request-text|request-list|' r'request-date|request-file|dbug|editor|link-relative-path|' r'emailer|parse-error)$', word): yield match.start(), Keyword.Namespace, word elif re.match( r'(halt|quit|do|load|q|recycle|call|run|ask|parse|view|unview|' r'return|exit|break)$', word): yield match.start(), Name.Exception, word elif re.match('REBOL$', word): yield match.start(), Generic.Heading, word elif re.match("to-.*", word): yield match.start(), Keyword, word elif re.match('(\+|-|\*|/|//|\*\*|and|or|xor|=\?|=|==|<>|<|>|<=|>=)$', word): yield match.start(), Operator, word elif re.match(".*\?$", word): yield match.start(), Keyword, word elif re.match(".*\!$", word): yield match.start(), Keyword.Type, word elif re.match("'.*", word): yield match.start(), Name.Variable.Instance, word # lit-word elif re.match("#.*", word): yield match.start(), Name.Label, word # issue elif re.match("%.*", word): yield match.start(), Name.Decorator, word # file else: yield match.start(), Name.Variable, word tokens = { 'root': [ (r'\s+', Text), (r'#"', String.Char, 'char'), (r'#{[0-9a-fA-F]*}', Number.Hex), (r'2#{', Number.Hex, 'bin2'), (r'64#{[0-9a-zA-Z+/=\s]*}', Number.Hex), (r'"', String, 'string'), (r'{', String, 'string2'), (r';#+.*\n', Comment.Special), (r';\*+.*\n', Comment.Preproc), (r';.*\n', Comment), (r'%"', Name.Decorator, 'stringFile'), (r'%[^(\^{^")\s\[\]]+', Name.Decorator), (r'<[a-zA-Z0-9:._-]*>', Name.Tag), (r'<[^(<>\s")]+', Name.Tag, 'tag'), (r'[+-]?([a-zA-Z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money (r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time (r'\d+\-[0-9a-zA-Z]+\-\d+(\/\d+\:\d+(\:\d+)?' r'([\.\d+]?([+-]?\d+:\d+)?)?)?', String.Other), # date (r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple (r'\d+[xX]\d+', Keyword.Constant), # pair (r'[+-]?\d+(\'\d+)?([\.,]\d*)?[eE][+-]?\d+', Number.Float), (r'[+-]?\d+(\'\d+)?[\.,]\d*', Number.Float), (r'[+-]?\d+(\'\d+)?', Number), (r'[\[\]\(\)]', Generic.Strong), (r'[a-zA-Z]+[^(\^{"\s:)]*://[^(\^{"\s)]*', Name.Decorator), # url (r'mailto:[^(\^{"@\s)]+@[^(\^{"@\s)]+', Name.Decorator), # url (r'[^(\^{"@\s)]+@[^(\^{"@\s)]+', Name.Decorator), # email (r'comment\s', Comment, 'comment'), (r'/[^(\^{^")\s/[\]]*', Name.Attribute), (r'([^(\^{^")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback), (r'([^(\^{^")\s]+)', Text), ], 'string': [ (r'[^(\^")]+', String), (escape_re, String.Escape), (r'[\(|\)]+', String), (r'\^.', String.Escape), (r'"', String, '#pop'), ], 'string2': [ (r'[^(\^{^})]+', String), (escape_re, String.Escape), (r'[\(|\)]+', String), (r'\^.', String.Escape), (r'{', String, '#push'), (r'}', String, '#pop'), ], 'stringFile': [ (r'[^(\^")]+', Name.Decorator), (escape_re, Name.Decorator), (r'\^.', Name.Decorator), (r'"', Name.Decorator, '#pop'), ], 'char': [ (escape_re + '"', String.Char, '#pop'), (r'\^."', String.Char, '#pop'), (r'."', String.Char, '#pop'), ], 'tag': [ (escape_re, Name.Tag), (r'"', Name.Tag, 'tagString'), (r'[^(<>\r\n")]+', Name.Tag), (r'>', Name.Tag, '#pop'), ], 'tagString': [ (r'[^(\^")]+', Name.Tag), (escape_re, Name.Tag), (r'[\(|\)]+', Name.Tag), (r'\^.', Name.Tag), (r'"', Name.Tag, '#pop'), ], 'tuple': [ (r'(\d+\.)+', Keyword.Constant), (r'\d+', Keyword.Constant, '#pop'), ], 'bin2': [ (r'\s+', Number.Hex), (r'([0-1]\s*){8}', Number.Hex), (r'}', Number.Hex, '#pop'), ], 'comment': [ (r'"', Comment, 'commentString1'), (r'{', Comment, 'commentString2'), (r'\[', Comment, 'commentBlock'), (r'[^(\s{\"\[]+', Comment, '#pop'), ], 'commentString1': [ (r'[^(\^")]+', Comment), (escape_re, Comment), (r'[\(|\)]+', Comment), (r'\^.', Comment), (r'"', Comment, '#pop'), ], 'commentString2': [ (r'[^(\^{^})]+', Comment), (escape_re, Comment), (r'[\(|\)]+', Comment), (r'\^.', Comment), (r'{', Comment, '#push'), (r'}', Comment, '#pop'), ], 'commentBlock': [ (r'\[',Comment, '#push'), (r'\]',Comment, '#pop'), (r'[^(\[\])]*', Comment), ], } class ABAPLexer(RegexLexer): """ Lexer for ABAP, SAP's integrated language. *New in Pygments 1.1.* """ name = 'ABAP' aliases = ['abap'] filenames = ['*.abap'] mimetypes = ['text/x-abap'] flags = re.IGNORECASE | re.MULTILINE tokens = { 'common': [ (r'\s+', Text), (r'^\*.*$', Comment.Single), (r'\".*?\n', Comment.Single), ], 'variable-names': [ (r'<[\S_]+>', Name.Variable), (r'[\w][\w_~]*(?:(\[\])|->\*)?', Name.Variable), ], 'root': [ include('common'), #function calls (r'(CALL\s+(?:BADI|CUSTOMER-FUNCTION|FUNCTION))(\s+)(\'?\S+\'?)', bygroups(Keyword, Text, Name.Function)), (r'(CALL\s+(?:DIALOG|SCREEN|SUBSCREEN|SELECTION-SCREEN|' r'TRANSACTION|TRANSFORMATION))\b', Keyword), (r'(FORM|PERFORM)(\s+)([\w_]+)', bygroups(Keyword, Text, Name.Function)), (r'(PERFORM)(\s+)(\()([\w_]+)(\))', bygroups(Keyword, Text, Punctuation, Name.Variable, Punctuation )), (r'(MODULE)(\s+)(\S+)(\s+)(INPUT|OUTPUT)', bygroups(Keyword, Text, Name.Function, Text, Keyword)), # method implementation (r'(METHOD)(\s+)([\w_~]+)', bygroups(Keyword, Text, Name.Function)), # method calls (r'(\s+)([\w_\-]+)([=\-]>)([\w_\-~]+)', bygroups(Text, Name.Variable, Operator, Name.Function)), # call methodnames returning style (r'(?<=(=|-)>)([\w_\-~]+)(?=\()', Name.Function), # keywords with dashes in them. # these need to be first, because for instance the -ID part # of MESSAGE-ID wouldn't get highlighted if MESSAGE was # first in the list of keywords. (r'(ADD-CORRESPONDING|AUTHORITY-CHECK|' r'CLASS-DATA|CLASS-EVENTS|CLASS-METHODS|CLASS-POOL|' r'DELETE-ADJACENT|DIVIDE-CORRESPONDING|' r'EDITOR-CALL|ENHANCEMENT-POINT|ENHANCEMENT-SECTION|EXIT-COMMAND|' r'FIELD-GROUPS|FIELD-SYMBOLS|FUNCTION-POOL|' r'INTERFACE-POOL|INVERTED-DATE|' r'LOAD-OF-PROGRAM|LOG-POINT|' r'MESSAGE-ID|MOVE-CORRESPONDING|MULTIPLY-CORRESPONDING|' r'NEW-LINE|NEW-PAGE|NEW-SECTION|NO-EXTENSION|' r'OUTPUT-LENGTH|PRINT-CONTROL|' r'SELECT-OPTIONS|START-OF-SELECTION|SUBTRACT-CORRESPONDING|' r'SYNTAX-CHECK|SYSTEM-EXCEPTIONS|' r'TYPE-POOL|TYPE-POOLS' r')\b', Keyword), # keyword kombinations (r'CREATE\s+(PUBLIC|PRIVATE|DATA|OBJECT)|' r'((PUBLIC|PRIVATE|PROTECTED)\s+SECTION|' r'(TYPE|LIKE)(\s+(LINE\s+OF|REF\s+TO|' r'(SORTED|STANDARD|HASHED)\s+TABLE\s+OF))?|' r'FROM\s+(DATABASE|MEMORY)|CALL\s+METHOD|' r'(GROUP|ORDER) BY|HAVING|SEPARATED BY|' r'GET\s+(BADI|BIT|CURSOR|DATASET|LOCALE|PARAMETER|' r'PF-STATUS|(PROPERTY|REFERENCE)\s+OF|' r'RUN\s+TIME|TIME\s+(STAMP)?)?|' r'SET\s+(BIT|BLANK\s+LINES|COUNTRY|CURSOR|DATASET|EXTENDED\s+CHECK|' r'HANDLER|HOLD\s+DATA|LANGUAGE|LEFT\s+SCROLL-BOUNDARY|' r'LOCALE|MARGIN|PARAMETER|PF-STATUS|PROPERTY\s+OF|' r'RUN\s+TIME\s+(ANALYZER|CLOCK\s+RESOLUTION)|SCREEN|' r'TITLEBAR|UPADTE\s+TASK\s+LOCAL|USER-COMMAND)|' r'CONVERT\s+((INVERTED-)?DATE|TIME|TIME\s+STAMP|TEXT)|' r'(CLOSE|OPEN)\s+(DATASET|CURSOR)|' r'(TO|FROM)\s+(DATA BUFFER|INTERNAL TABLE|MEMORY ID|' r'DATABASE|SHARED\s+(MEMORY|BUFFER))|' r'DESCRIBE\s+(DISTANCE\s+BETWEEN|FIELD|LIST|TABLE)|' r'FREE\s(MEMORY|OBJECT)?|' r'PROCESS\s+(BEFORE\s+OUTPUT|AFTER\s+INPUT|' r'ON\s+(VALUE-REQUEST|HELP-REQUEST))|' r'AT\s+(LINE-SELECTION|USER-COMMAND|END\s+OF|NEW)|' r'AT\s+SELECTION-SCREEN(\s+(ON(\s+(BLOCK|(HELP|VALUE)-REQUEST\s+FOR|' r'END\s+OF|RADIOBUTTON\s+GROUP))?|OUTPUT))?|' r'SELECTION-SCREEN:?\s+((BEGIN|END)\s+OF\s+((TABBED\s+)?BLOCK|LINE|' r'SCREEN)|COMMENT|FUNCTION\s+KEY|' r'INCLUDE\s+BLOCKS|POSITION|PUSHBUTTON|' r'SKIP|ULINE)|' r'LEAVE\s+(LIST-PROCESSING|PROGRAM|SCREEN|' r'TO LIST-PROCESSING|TO TRANSACTION)' r'(ENDING|STARTING)\s+AT|' r'FORMAT\s+(COLOR|INTENSIFIED|INVERSE|HOTSPOT|INPUT|FRAMES|RESET)|' r'AS\s+(CHECKBOX|SUBSCREEN|WINDOW)|' r'WITH\s+(((NON-)?UNIQUE)?\s+KEY|FRAME)|' r'(BEGIN|END)\s+OF|' r'DELETE(\s+ADJACENT\s+DUPLICATES\sFROM)?|' r'COMPARING(\s+ALL\s+FIELDS)?|' r'INSERT(\s+INITIAL\s+LINE\s+INTO|\s+LINES\s+OF)?|' r'IN\s+((BYTE|CHARACTER)\s+MODE|PROGRAM)|' r'END-OF-(DEFINITION|PAGE|SELECTION)|' r'WITH\s+FRAME(\s+TITLE)|' # simple kombinations r'AND\s+(MARK|RETURN)|CLIENT\s+SPECIFIED|CORRESPONDING\s+FIELDS\s+OF|' r'IF\s+FOUND|FOR\s+EVENT|INHERITING\s+FROM|LEAVE\s+TO\s+SCREEN|' r'LOOP\s+AT\s+(SCREEN)?|LOWER\s+CASE|MATCHCODE\s+OBJECT|MODIF\s+ID|' r'MODIFY\s+SCREEN|NESTING\s+LEVEL|NO\s+INTERVALS|OF\s+STRUCTURE|' r'RADIOBUTTON\s+GROUP|RANGE\s+OF|REF\s+TO|SUPPRESS DIALOG|' r'TABLE\s+OF|UPPER\s+CASE|TRANSPORTING\s+NO\s+FIELDS|' r'VALUE\s+CHECK|VISIBLE\s+LENGTH|HEADER\s+LINE)\b', Keyword), # single word keywords. (r'(^|(?<=(\s|\.)))(ABBREVIATED|ADD|ALIASES|APPEND|ASSERT|' r'ASSIGN(ING)?|AT(\s+FIRST)?|' r'BACK|BLOCK|BREAK-POINT|' r'CASE|CATCH|CHANGING|CHECK|CLASS|CLEAR|COLLECT|COLOR|COMMIT|' r'CREATE|COMMUNICATION|COMPONENTS?|COMPUTE|CONCATENATE|CONDENSE|' r'CONSTANTS|CONTEXTS|CONTINUE|CONTROLS|' r'DATA|DECIMALS|DEFAULT|DEFINE|DEFINITION|DEFERRED|DEMAND|' r'DETAIL|DIRECTORY|DIVIDE|DO|' r'ELSE(IF)?|ENDAT|ENDCASE|ENDCLASS|ENDDO|ENDFORM|ENDFUNCTION|' r'ENDIF|ENDLOOP|ENDMETHOD|ENDMODULE|ENDSELECT|ENDTRY|' r'ENHANCEMENT|EVENTS|EXCEPTIONS|EXIT|EXPORT|EXPORTING|EXTRACT|' r'FETCH|FIELDS?|FIND|FOR|FORM|FORMAT|FREE|FROM|' r'HIDE|' r'ID|IF|IMPORT|IMPLEMENTATION|IMPORTING|IN|INCLUDE|INCLUDING|' r'INDEX|INFOTYPES|INITIALIZATION|INTERFACE|INTERFACES|INTO|' r'LENGTH|LINES|LOAD|LOCAL|' r'JOIN|' r'KEY|' r'MAXIMUM|MESSAGE|METHOD[S]?|MINIMUM|MODULE|MODIFY|MOVE|MULTIPLY|' r'NODES|' r'OBLIGATORY|OF|OFF|ON|OVERLAY|' r'PACK|PARAMETERS|PERCENTAGE|POSITION|PROGRAM|PROVIDE|PUBLIC|PUT|' r'RAISE|RAISING|RANGES|READ|RECEIVE|REFRESH|REJECT|REPORT|RESERVE|' r'RESUME|RETRY|RETURN|RETURNING|RIGHT|ROLLBACK|' r'SCROLL|SEARCH|SELECT|SHIFT|SINGLE|SKIP|SORT|SPLIT|STATICS|STOP|' r'SUBMIT|SUBTRACT|SUM|SUMMARY|SUMMING|SUPPLY|' r'TABLE|TABLES|TIMES|TITLE|TO|TOP-OF-PAGE|TRANSFER|TRANSLATE|TRY|TYPES|' r'ULINE|UNDER|UNPACK|UPDATE|USING|' r'VALUE|VALUES|VIA|' r'WAIT|WHEN|WHERE|WHILE|WITH|WINDOW|WRITE)\b', Keyword), # builtins (r'(abs|acos|asin|atan|' r'boolc|boolx|bit_set|' r'char_off|charlen|ceil|cmax|cmin|condense|contains|' r'contains_any_of|contains_any_not_of|concat_lines_of|cos|cosh|' r'count|count_any_of|count_any_not_of|' r'dbmaxlen|distance|' r'escape|exp|' r'find|find_end|find_any_of|find_any_not_of|floor|frac|from_mixed|' r'insert|' r'lines|log|log10|' r'match|matches|' r'nmax|nmin|numofchar|' r'repeat|replace|rescale|reverse|round|' r'segment|shift_left|shift_right|sign|sin|sinh|sqrt|strlen|' r'substring|substring_after|substring_from|substring_before|substring_to|' r'tan|tanh|to_upper|to_lower|to_mixed|translate|trunc|' r'xstrlen)(\()\b', bygroups(Name.Builtin, Punctuation)), (r'&[0-9]', Name), (r'[0-9]+', Number.Integer), # operators which look like variable names before # parsing variable names. (r'(?<=(\s|.))(AND|EQ|NE|GT|LT|GE|LE|CO|CN|CA|NA|CS|NOT|NS|CP|NP|' r'BYTE-CO|BYTE-CN|BYTE-CA|BYTE-NA|BYTE-CS|BYTE-NS|' r'IS\s+(NOT\s+)?(INITIAL|ASSIGNED|REQUESTED|BOUND))\b', Operator), include('variable-names'), # standard oparators after variable names, # because < and > are part of field symbols. (r'[?*<>=\-+]', Operator), (r"'(''|[^'])*'", String.Single), (r'[/;:()\[\],\.]', Punctuation) ], } class NewspeakLexer(RegexLexer): """ For `Newspeak <http://newspeaklanguage.org/>` syntax. """ name = 'Newspeak' filenames = ['*.ns2'] aliases = ['newspeak', ] mimetypes = ['text/x-newspeak'] tokens = { 'root' : [ (r'\b(Newsqueak2)\b',Keyword.Declaration), (r"'[^']*'",String), (r'\b(class)(\s+)([a-zA-Z0-9_]+)(\s*)', bygroups(Keyword.Declaration,Text,Name.Class,Text)), (r'\b(mixin|self|super|private|public|protected|nil|true|false)\b', Keyword), (r'([a-zA-Z0-9_]+\:)(\s*)([a-zA-Z_]\w+)', bygroups(Name.Function,Text,Name.Variable)), (r'([a-zA-Z0-9_]+)(\s*)(=)', bygroups(Name.Attribute,Text,Operator)), (r'<[a-zA-Z0-9_]+>', Comment.Special), include('expressionstat'), include('whitespace') ], 'expressionstat': [ (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), (r'\d+', Number.Integer), (r':\w+',Name.Variable), (r'(\w+)(::)', bygroups(Name.Variable, Operator)), (r'\w+:', Name.Function), (r'\w+', Name.Variable), (r'\(|\)', Punctuation), (r'\[|\]', Punctuation), (r'\{|\}', Punctuation), (r'(\^|\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-|:)', Operator), (r'\.|;', Punctuation), include('whitespace'), include('literals'), ], 'literals': [ (r'\$.', String), (r"'[^']*'", String), (r"#'[^']*'", String.Symbol), (r"#\w+:?", String.Symbol), (r"#(\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-)+", String.Symbol) ], 'whitespace' : [ (r'\s+', Text), (r'"[^"]*"', Comment) ] } class GherkinLexer(RegexLexer): """ For `Gherkin <http://cukes.info/>` syntax. *New in Pygments 1.2.* """ name = 'Gherkin' aliases = ['Cucumber', 'cucumber', 'Gherkin', 'gherkin'] filenames = ['*.feature'] mimetypes = ['text/x-gherkin'] feature_keywords_regexp = ur'^(기능|機能|功能|フィーチャ|خاصية|תכונה|Функционалност|Функционал|Особина|Могућност|Özellik|Właściwość|Tính năng|Savybė|Požiadavka|Požadavek|Osobina|Ominaisuus|Omadus|OH HAI|Mogućnost|Mogucnost|Jellemző|Fīča|Funzionalità|Funktionalität|Funkcionalnost|Funkcionalitāte|Funcționalitate|Functionaliteit|Functionalitate|Funcionalidade|Fonctionnalité|Fitur|Feature|Egenskap|Egenskab|Crikey|Característica|Arwedd)(:)(.*)$' scenario_keywords_regexp = ur'^(\s*)(시나리오 개요|시나리오|배경|背景|場景大綱|場景|场景大纲|场景|劇本大綱|劇本|テンプレ|シナリオテンプレート|シナリオテンプレ|シナリオアウトライン|シナリオ|سيناريو مخطط|سيناريو|الخلفية|תרחיש|תבנית תרחיש|רקע|Тарих|Сценарио|Сценарий структураси|Сценарий|Структура сценарија|Структура сценария|Скица|Рамка на сценарий|Пример|Предыстория|Предистория|Позадина|Основа|Концепт|Контекст|Założenia|Tình huống|Tausta|Taust|Tapausaihio|Tapaus|Szenariogrundriss|Szenario|Szablon scenariusza|Stsenaarium|Struktura scenarija|Skica|Skenario konsep|Skenario|Situācija|Senaryo taslağı|Senaryo|Scénář|Scénario|Schema dello scenario|Scenārijs pēc parauga|Scenārijs|Scenár|Scenariusz|Scenariul de şablon|Scenariul de sablon|Scenariu|Scenario Outline|Scenario Amlinellol|Scenario|Scenarijus|Scenarijaus šablonas|Scenarij|Scenarie|Rerefons|Raamstsenaarium|Primer|Pozadí|Pozadina|Pozadie|Plan du scénario|Plan du Scénario|Osnova scénáře|Osnova|Náčrt Scénáře|Náčrt Scenáru|Mate|MISHUN SRSLY|MISHUN|Kịch bản|Kontext|Konteksts|Kontekstas|Kontekst|Koncept|Khung tình huống|Khung kịch bản|Háttér|Grundlage|Geçmiş|Forgatókönyv vázlat|Forgatókönyv|Esquema do Cenário|Esquema do Cenario|Esquema del escenario|Esquema de l\'escenari|Escenario|Escenari|Dasar|Contexto|Contexte|Contesto|Condiţii|Conditii|Cenário|Cenario|Cefndir|Bối cảnh|Blokes|Bakgrunn|Bakgrund|Baggrund|Background|B4|Antecedents|Antecedentes|All y\'all|Achtergrond|Abstrakt Scenario|Abstract Scenario)(:)(.*)$' examples_regexp = ur'^(\s*)(예|例子|例|サンプル|امثلة|דוגמאות|Сценарији|Примери|Мисоллар|Значения|Örnekler|Voorbeelden|Variantai|Tapaukset|Scenarios|Scenariji|Scenarijai|Příklady|Példák|Príklady|Przykłady|Primjeri|Primeri|Piemēri|Pavyzdžiai|Paraugs|Juhtumid|Exemplos|Exemples|Exemplele|Exempel|Examples|Esempi|Enghreifftiau|Eksempler|Ejemplos|EXAMPLZ|Dữ liệu|Contoh|Cobber|Beispiele)(:)(.*)$' step_keywords_regexp = ur'^(\s*)(하지만|조건|만일|그리고|그러면|那麼|那么|而且|當|当|前提|假設|假如|但是|但し|並且|もし|ならば|ただし|しかし|かつ|و |متى |لكن |عندما |ثم |بفرض |اذاً |כאשר |וגם |בהינתן |אזי |אז |אבל |Унда |То |Онда |Но |Лекин |Когато |Када |Кад |К тому же |И |Задато |Задати |Задате |Если |Допустим |Дадено |Ва |Бирок |Аммо |Али |Агар |А |Și |És |anrhegedig a |Zatati |Zakładając |Zadato |Zadate |Zadano |Zadani |Zadan |Yna |Ya know how |Ya gotta |Y |Wtedy |When y\'all |When |Wenn |WEN |Và |Ve |Und |Un |Thì |Then y\'all |Then |Tapi |Tak |Tada |Tad |Så |Soit |Siis |Si |Quando |Quand |Quan |Pryd |Pokud |Pokiaľ |Però |Pero |Pak |Oraz |Onda |Ond |Oletetaan |Og |Och |O zaman |Når |När |Niin |Nhưng |N |Mutta |Men |Mas |Maka |Majd |Mais |Maar |Ma |Lorsque |Lorsqu\'|Kun |Kuid |Kui |Khi |Keď |Ketika |Když |Kai |Kada |Kad |Jeżeli |Ja |Ir |I CAN HAZ |I |Ha |Givet |Given y\'all |Given |Gitt |Gegeven |Gegeben sei |Fakat |Eğer ki |Etant donné |Et |Então |Entonces |Entao |En |Eeldades |E |Duota |Donat |Donada |Diyelim ki |Dengan |De |Dato |Dar |Dann |Dan |Dado |Dacă |Daca |DEN |Când |Cuando |Cho |Cept |Cand |But y\'all |But |Biết |Bet |BUT |Atunci |And y\'all |And |Ama |Als |Alors |Allora |Ali |Aleshores |Ale |Akkor |Aber |AN |A také |A )' tokens = { 'comments': [ (r'#.*$', Comment), ], 'multiline_descriptions' : [ (step_keywords_regexp, Keyword, "#pop"), include('comments'), (r"(\s|.)", Name.Constant), ], 'multiline_descriptions_on_stack' : [ (step_keywords_regexp, Keyword, "#pop:2"), include('comments'), (r"(\s|.)", Name.Constant), ], 'scenario_table_description': [ (r"\s+\|", Text, 'scenario_table_header'), include('comments'), (r"(\s|.)", Name.Constant), ], 'scenario_table_header': [ (r"\s+\|\s*$", Text, "#pop:2"), (r"(\s+\|\s*)(#.*)$", bygroups(Text, Comment), "#pop:2"), include('comments'), (r"\s+\|", Text), (r"[^\|]", Name.Variable), ], 'scenario_sections_on_stack': [ (scenario_keywords_regexp, bygroups(Text, Name.Class, Name.Class, Name.Constant), "multiline_descriptions_on_stack"), ], 'narrative': [ include('scenario_sections_on_stack'), (r"(\s|.)", Name.Builtin), ], 'table_vars': [ (r'(<[^>]*>)', bygroups(Name.Variable)), ], 'string': [ include('table_vars'), (r'(\s|.)', String), ], 'py_string': [ (r'"""', String, "#pop"), include('string'), ], 'double_string': [ (r'"', String, "#pop"), include('string'), ], 'single_string': [ (r"'", String, "#pop"), include('string'), ], 'root': [ (r'\n', Text), include('comments'), (r'"""', String, "py_string"), (r'"', String, "double_string"), (r"'", String, "single_string"), include('table_vars'), (r'@[^@\s]+', Name.Namespace), (step_keywords_regexp, bygroups(Text, Keyword)), (feature_keywords_regexp, bygroups(Name.Class, Name.Class, Name.Constant), 'narrative'), (scenario_keywords_regexp, bygroups(Text, Name.Class, Name.Class, Name.Constant), "multiline_descriptions"), (examples_regexp, bygroups(Text, Name.Class, Name.Class, Name.Constant), "scenario_table_description"), (r'(\s|.)', Text), ] } class AsymptoteLexer(RegexLexer): """ For `Asymptote <http://asymptote.sf.net/>`_ source code. *New in Pygments 1.2.* """ name = 'Asymptote' aliases = ['asy', 'asymptote'] filenames = ['*.asy'] mimetypes = ['text/x-asymptote'] #: optional Comment or Whitespace _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+' tokens = { 'whitespace': [ (r'\n', Text), (r'\s+', Text), (r'\\\n', Text), # line continuation (r'//(\n|(.|\n)*?[^\\]\n)', Comment), (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment), ], 'statements': [ # simple string (TeX friendly) (r'"(\\\\|\\"|[^"])*"', String), # C style string (with character escapes) (r"'", String, 'string'), (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), (r'0x[0-9a-fA-F]+[Ll]?', Number.Hex), (r'0[0-7]+[Ll]?', Number.Oct), (r'\d+[Ll]?', Number.Integer), (r'[~!%^&*+=|?:<>/-]', Operator), (r'[()\[\],.]', Punctuation), (r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)), (r'(and|controls|tension|atleast|curl|if|else|while|for|do|' r'return|break|continue|struct|typedef|new|access|import|' r'unravel|from|include|quote|static|public|private|restricted|' r'this|explicit|true|false|null|cycle|newframe|operator)\b', Keyword), # Since an asy-type-name can be also an asy-function-name, # in the following we test if the string " [a-zA-Z]" follows # the Keyword.Type. # Of course it is not perfect ! (r'(Braid|FitResult|Label|Legend|TreeNode|abscissa|arc|arrowhead|' r'binarytree|binarytreeNode|block|bool|bool3|bounds|bqe|circle|' r'conic|coord|coordsys|cputime|ellipse|file|filltype|frame|grid3|' r'guide|horner|hsv|hyperbola|indexedTransform|int|inversion|key|' r'light|line|linefit|marginT|marker|mass|object|pair|parabola|path|' r'path3|pen|picture|point|position|projection|real|revolution|' r'scaleT|scientific|segment|side|slice|splitface|string|surface|' r'tensionSpecifier|ticklocate|ticksgridT|tickvalues|transform|' r'transformation|tree|triangle|trilinear|triple|vector|' r'vertex|void)(?=([ ]{1,}[a-zA-Z]))', Keyword.Type), # Now the asy-type-name which are not asy-function-name # except yours ! # Perhaps useless (r'(Braid|FitResult|TreeNode|abscissa|arrowhead|block|bool|bool3|' r'bounds|coord|frame|guide|horner|int|linefit|marginT|pair|pen|' r'picture|position|real|revolution|slice|splitface|ticksgridT|' r'tickvalues|tree|triple|vertex|void)\b', Keyword.Type), ('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label), ('[a-zA-Z_][a-zA-Z0-9_]*', Name), ], 'root': [ include('whitespace'), # functions (r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name r'(\s*\([^;]*?\))' # signature r'(' + _ws + r')({)', bygroups(using(this), Name.Function, using(this), using(this), Punctuation), 'function'), # function declarations (r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name r'(\s*\([^;]*?\))' # signature r'(' + _ws + r')(;)', bygroups(using(this), Name.Function, using(this), using(this), Punctuation)), ('', Text, 'statement'), ], 'statement' : [ include('whitespace'), include('statements'), ('[{}]', Punctuation), (';', Punctuation, '#pop'), ], 'function': [ include('whitespace'), include('statements'), (';', Punctuation), ('{', Punctuation, '#push'), ('}', Punctuation, '#pop'), ], 'string': [ (r"'", String, '#pop'), (r'\\([\\abfnrtv"\'?]|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), (r'\n', String), (r"[^\\'\n]+", String), # all other characters (r'\\\n', String), (r'\\n', String), # line continuation (r'\\', String), # stray backslash ] } def get_tokens_unprocessed(self, text): from pygments.lexers._asybuiltins import ASYFUNCNAME, ASYVARNAME for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text): if token is Name and value in ASYFUNCNAME: token = Name.Function elif token is Name and value in ASYVARNAME: token = Name.Variable yield index, token, value
tools/yuidoc/bin/pygments/lexers/other.py
107,772
-*- coding: utf-8 -*- TODO: Backslash escapes? not a real string literal in ANSI SQL TODO: add backslash escapes TODO: this list is not complete use different colors for different instruction types Traditional math Move, imperatives Stack ops, imperatives Befunge-98 stack ops Strings don't appear to allow escapes Single character Trampoline... depends on direction hit Misc Fingerprints Whitespace doesn't matter To support output lexers (say diff output), the output needs to be broken by prompts whenever the output lexer changes. Lines can start with @ to prevent echo If made more specific, make sure you still allow expansions like %~$VAR:zlt Technically :: only works at BOL Escapes only valid within echo args? Whitespace: Lexemes: Identifiers Operators mode mode Numbers Numbers Strings exceptions db-refs Keywords builtins special variables skip whitespace other operators function call variables temporaries Not perfect can't allow whitespaces at the beginning and the without breaking everything else pop literals This state is a bit tricky since we can't just pop this state skip whitespace and comments squeak chunk delimeter Squeak fileout format (optional) Directives Comments Whitespace Numbers Variables Event handlers Execution-context methods Reflection DCGs and term expansion Entity Entity relations Events Flags Compiling, loading, and library paths Database Control All solutions Multi-threading meta-predicates Term unification Term creation and decomposition Evaluable functors Other arithmetic functors Term testing Stream selection and control Character and byte input/output Term input/output Atomic term processing Implementation defined hooks functions Message sending operators External call Logic and control Bitwise functors Arithemtic evaluation Arithemtic comparison Term creation and decomposition Term unification Term comparison Evaluable functors Other arithemtic functors DCG rules Control constructs Logic and control Mode operators Strings Ponctuation Atoms Conditional compilation directives Entity directives Predicate scope directives Other directives Numbers Variables Atoms Strings End of entity-opening directive Scope operator Ponctuation Comments Whitespace macros don't add the newline to the Comment token semicolon and newline end the argument list all other characters line continuation stray backslash newline ends the string too escaped single quote all other characters line continuation normal backslash newline ends the string too macros TODO: <=, etc This is a possessive, consider moving line continuation all other characters line continuation stray backslash lit-word issue file money time date tuple pair url url emailfunction calls method implementation method calls call methodnames returning style keywords with dashes in them. these need to be first, because for instance the -ID part of MESSAGE-ID wouldn't get highlighted if MESSAGE was first in the list of keywords. keyword kombinations simple kombinations single word keywords. builtins operators which look like variable names before parsing variable names. standard oparators after variable names, because < and > are part of field symbols.: optional Comment or Whitespace line continuation simple string (TeX friendly) C style string (with character escapes) Since an asy-type-name can be also an asy-function-name, in the following we test if the string " [a-zA-Z]" follows the Keyword.Type. Of course it is not perfect ! Now the asy-type-name which are not asy-function-name except yours ! Perhaps useless functions return arguments method name signature function declarations return arguments method name signature all other characters line continuation stray backslash
3,704
en
0.769783
# # Copyright 2014 Google Inc. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. # """Tests for client module.""" import responses import time import googlemaps from googlemaps import client as _client import test as _test import requests class ClientTest(_test.TestCase): def test_no_api_key(self): with self.assertRaises(Exception): client = googlemaps.Client() client.directions("Sydney", "Melbourne") def test_invalid_api_key(self): with self.assertRaises(Exception): client = googlemaps.Client(key="Invalid key.") client.directions("Sydney", "Melbourne") def test_urlencode(self): # See GH #72. encoded_params = _client.urlencode_params([("address", "=Sydney ~")]) self.assertEqual("address=%3DSydney+~", encoded_params) @responses.activate def test_queries_per_second(self): # This test assumes that the time to run a mocked query is # relatively small, eg a few milliseconds. We define a rate of # 3 queries per second, and run double that, which should take at # least 1 second but no more than 2. queries_per_second = 3 query_range = range(queries_per_second * 2) for _ in query_range: responses.add(responses.GET, "https://maps.googleapis.com/maps/api/geocode/json", body='{"status":"OK","results":[]}', status=200, content_type="application/json") client = googlemaps.Client(key="AIzaasdf", queries_per_second=queries_per_second) start = time.time() for _ in query_range: client.geocode("Sesame St.") end = time.time() self.assertTrue(start + 1 < end < start + 2) @responses.activate def test_key_sent(self): responses.add(responses.GET, "https://maps.googleapis.com/maps/api/geocode/json", body='{"status":"OK","results":[]}', status=200, content_type="application/json") client = googlemaps.Client(key="AIzaasdf") client.geocode("Sesame St.") self.assertEqual(1, len(responses.calls)) self.assertURLEqual("https://maps.googleapis.com/maps/api/geocode/json?" "key=AIzaasdf&address=Sesame+St.", responses.calls[0].request.url) @responses.activate def test_extra_params(self): responses.add(responses.GET, "https://maps.googleapis.com/maps/api/geocode/json", body='{"status":"OK","results":[]}', status=200, content_type="application/json") client = googlemaps.Client(key="AIzaasdf") client.geocode("Sesame St.", extra_params={"foo": "bar"}) self.assertEqual(1, len(responses.calls)) self.assertURLEqual("https://maps.googleapis.com/maps/api/geocode/json?" "key=AIzaasdf&address=Sesame+St.&foo=bar", responses.calls[0].request.url) def test_hmac(self): """ From http://en.wikipedia.org/wiki/Hash-based_message_authentication_code HMAC_SHA1("key", "The quick brown fox jumps over the lazy dog") = 0xde7c9b85b8b78aa6bc8a7a36f70a90701c9db4d9 """ message = "The quick brown fox jumps over the lazy dog" key = "a2V5" # "key" -> base64 signature = "3nybhbi3iqa8ino29wqQcBydtNk=" self.assertEqual(signature, _client.sign_hmac(key, message)) @responses.activate def test_url_signed(self): responses.add(responses.GET, "https://maps.googleapis.com/maps/api/geocode/json", body='{"status":"OK","results":[]}', status=200, content_type="application/json") client = googlemaps.Client(client_id="foo", client_secret="a2V5") client.geocode("Sesame St.") self.assertEqual(1, len(responses.calls)) # Check ordering of parameters. self.assertIn("address=Sesame+St.&client=foo&signature", responses.calls[0].request.url) self.assertURLEqual("https://maps.googleapis.com/maps/api/geocode/json?" "address=Sesame+St.&client=foo&" "signature=fxbWUIcNPZSekVOhp2ul9LW5TpY=", responses.calls[0].request.url) @responses.activate def test_ua_sent(self): responses.add(responses.GET, "https://maps.googleapis.com/maps/api/geocode/json", body='{"status":"OK","results":[]}', status=200, content_type="application/json") client = googlemaps.Client(key="AIzaasdf") client.geocode("Sesame St.") self.assertEqual(1, len(responses.calls)) user_agent = responses.calls[0].request.headers["User-Agent"] self.assertTrue(user_agent.startswith("GoogleGeoApiClientPython")) @responses.activate def test_retry(self): class request_callback: def __init__(self): self.first_req = True def __call__(self, req): if self.first_req: self.first_req = False return (200, {}, '{"status":"OVER_QUERY_LIMIT"}') return (200, {}, '{"status":"OK","results":[]}') responses.add_callback(responses.GET, "https://maps.googleapis.com/maps/api/geocode/json", content_type='application/json', callback=request_callback()) client = googlemaps.Client(key="AIzaasdf") client.geocode("Sesame St.") self.assertEqual(2, len(responses.calls)) self.assertEqual(responses.calls[0].request.url, responses.calls[1].request.url) @responses.activate def test_transport_error(self): responses.add(responses.GET, "https://maps.googleapis.com/maps/api/geocode/json", status=404, content_type='application/json') client = googlemaps.Client(key="AIzaasdf") with self.assertRaises(googlemaps.exceptions.HTTPError) as e: client.geocode("Foo") self.assertEqual(e.exception.status_code, 404) @responses.activate def test_host_override(self): responses.add(responses.GET, "https://foo.com/bar", body='{"status":"OK","results":[]}', status=200, content_type="application/json") client = googlemaps.Client(key="AIzaasdf") client._get("/bar", {}, base_url="https://foo.com") self.assertEqual(1, len(responses.calls)) @responses.activate def test_custom_extract(self): def custom_extract(resp): return resp.json() responses.add(responses.GET, "https://maps.googleapis.com/bar", body='{"error":"errormessage"}', status=403, content_type="application/json") client = googlemaps.Client(key="AIzaasdf") b = client._get("/bar", {}, extract_body=custom_extract) self.assertEqual(1, len(responses.calls)) self.assertEqual("errormessage", b["error"]) @responses.activate def test_retry_intermittent(self): class request_callback: def __init__(self): self.first_req = True def __call__(self, req): if self.first_req: self.first_req = False return (500, {}, 'Internal Server Error.') return (200, {}, '{"status":"OK","results":[]}') responses.add_callback(responses.GET, "https://maps.googleapis.com/maps/api/geocode/json", content_type="application/json", callback=request_callback()) client = googlemaps.Client(key="AIzaasdf") client.geocode("Sesame St.") self.assertEqual(2, len(responses.calls)) def test_channel_without_client_id(self): with self.assertRaises(ValueError): client = googlemaps.Client(key="AIzaasdf", channel="mychannel") def test_invalid_channel(self): # Cf. limitations here: # https://developers.google.com/maps/premium/reports # /usage-reports#channels with self.assertRaises(ValueError): client = googlemaps.Client(client_id="foo", client_secret="a2V5", channel="auieauie$? ") def test_auth_url_with_channel(self): client = googlemaps.Client(key="AIzaasdf", client_id="foo", client_secret="a2V5", channel="MyChannel_1") # Check ordering of parameters + signature. auth_url = client._generate_auth_url("/test", {"param": "param"}, accepts_clientid=True) self.assertEqual(auth_url, "/test?param=param" "&channel=MyChannel_1" "&client=foo" "&signature=OH18GuQto_mEpxj99UimKskvo4k=") # Check if added to requests to API with accepts_clientid=False auth_url = client._generate_auth_url("/test", {"param": "param"}, accepts_clientid=False) self.assertEqual(auth_url, "/test?param=param&key=AIzaasdf") def test_requests_version(self): client_args_timeout = { "key": "AIzaasdf", "client_id": "foo", "client_secret": "a2V5", "channel": "MyChannel_1", "connect_timeout": 5, "read_timeout": 5 } client_args = client_args_timeout.copy() del client_args["connect_timeout"] del client_args["read_timeout"] requests.__version__ = '2.3.0' with self.assertRaises(NotImplementedError): googlemaps.Client(**client_args_timeout) googlemaps.Client(**client_args) requests.__version__ = '2.4.0' googlemaps.Client(**client_args_timeout) googlemaps.Client(**client_args) @responses.activate def test_no_retry_over_query_limit(self): responses.add(responses.GET, "https://maps.googleapis.com/foo", body='{"status":"OVER_QUERY_LIMIT"}', status=200, content_type="application/json") client = googlemaps.Client(key="AIzaasdf", retry_over_query_limit=False) with self.assertRaises(googlemaps.exceptions.ApiError): client._request("/foo", {}) self.assertEqual(1, len(responses.calls))
test/test_client.py
11,711
From http://en.wikipedia.org/wiki/Hash-based_message_authentication_code HMAC_SHA1("key", "The quick brown fox jumps over the lazy dog") = 0xde7c9b85b8b78aa6bc8a7a36f70a90701c9db4d9 Tests for client module. Copyright 2014 Google Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See GH 72. This test assumes that the time to run a mocked query is relatively small, eg a few milliseconds. We define a rate of 3 queries per second, and run double that, which should take at least 1 second but no more than 2. "key" -> base64 Check ordering of parameters. Cf. limitations here: https://developers.google.com/maps/premium/reports /usage-reportschannels Check ordering of parameters + signature. Check if added to requests to API with accepts_clientid=False
1,256
en
0.819432
# -*-coding:Utf-8 -* # Copyright (c) 2010-2017 LE GOFF Vincent # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT # OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """Package contenant le paramètre 'créer' de la commande 'banc'.""" from primaires.interpreteur.masque.parametre import Parametre from primaires.interpreteur.editeur.presentation import Presentation class PrmCreer(Parametre): """Commande 'banc créer'""" def __init__(self): """Constructeur du paramètre.""" Parametre.__init__(self, "creer", "create") self.schema = "<cle>" self.aide_courte = "crée un banc de poisson" self.aide_longue = \ "Cette commande permet de créer un nouveau banc de " \ "poisson. Vous devez préciser en argument la clé identifiant " \ "le banc." def interpreter(self, personnage, dic_masques): """Méthode d'interprétation de commande""" cle = dic_masques["cle"].cle if cle in importeur.peche.bancs: personnage << "|err|Ce banc existe déjà.|ff|" return banc = importeur.peche.creer_banc(cle) editeur = importeur.interpreteur.construire_editeur( "schooledit", personnage, banc) personnage.contextes.ajouter(editeur) editeur.actualiser()
src/secondaires/peche/commandes/banc/creer.py
2,751
Commande 'banc créer' Constructeur du paramètre. Méthode d'interprétation de commande Package contenant le paramètre 'créer' de la commande 'banc'. -*-coding:Utf-8 -* Copyright (c) 2010-2017 LE GOFF Vincent All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1,662
en
0.816426
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class MybankPaymentTradeFinancingOrderRefundModel(object): def __init__(self): self._amount = None self._biz_no = None self._currency_value = None self._ext_info = None self._order_no = None self._refund_type = None self._remark = None self._request_no = None self._request_time = None self._scene_type = None @property def amount(self): return self._amount @amount.setter def amount(self, value): self._amount = value @property def biz_no(self): return self._biz_no @biz_no.setter def biz_no(self, value): self._biz_no = value @property def currency_value(self): return self._currency_value @currency_value.setter def currency_value(self, value): self._currency_value = value @property def ext_info(self): return self._ext_info @ext_info.setter def ext_info(self, value): self._ext_info = value @property def order_no(self): return self._order_no @order_no.setter def order_no(self, value): self._order_no = value @property def refund_type(self): return self._refund_type @refund_type.setter def refund_type(self, value): self._refund_type = value @property def remark(self): return self._remark @remark.setter def remark(self, value): self._remark = value @property def request_no(self): return self._request_no @request_no.setter def request_no(self, value): self._request_no = value @property def request_time(self): return self._request_time @request_time.setter def request_time(self, value): self._request_time = value @property def scene_type(self): return self._scene_type @scene_type.setter def scene_type(self, value): self._scene_type = value def to_alipay_dict(self): params = dict() if self.amount: if hasattr(self.amount, 'to_alipay_dict'): params['amount'] = self.amount.to_alipay_dict() else: params['amount'] = self.amount if self.biz_no: if hasattr(self.biz_no, 'to_alipay_dict'): params['biz_no'] = self.biz_no.to_alipay_dict() else: params['biz_no'] = self.biz_no if self.currency_value: if hasattr(self.currency_value, 'to_alipay_dict'): params['currency_value'] = self.currency_value.to_alipay_dict() else: params['currency_value'] = self.currency_value if self.ext_info: if hasattr(self.ext_info, 'to_alipay_dict'): params['ext_info'] = self.ext_info.to_alipay_dict() else: params['ext_info'] = self.ext_info if self.order_no: if hasattr(self.order_no, 'to_alipay_dict'): params['order_no'] = self.order_no.to_alipay_dict() else: params['order_no'] = self.order_no if self.refund_type: if hasattr(self.refund_type, 'to_alipay_dict'): params['refund_type'] = self.refund_type.to_alipay_dict() else: params['refund_type'] = self.refund_type if self.remark: if hasattr(self.remark, 'to_alipay_dict'): params['remark'] = self.remark.to_alipay_dict() else: params['remark'] = self.remark if self.request_no: if hasattr(self.request_no, 'to_alipay_dict'): params['request_no'] = self.request_no.to_alipay_dict() else: params['request_no'] = self.request_no if self.request_time: if hasattr(self.request_time, 'to_alipay_dict'): params['request_time'] = self.request_time.to_alipay_dict() else: params['request_time'] = self.request_time if self.scene_type: if hasattr(self.scene_type, 'to_alipay_dict'): params['scene_type'] = self.scene_type.to_alipay_dict() else: params['scene_type'] = self.scene_type return params @staticmethod def from_alipay_dict(d): if not d: return None o = MybankPaymentTradeFinancingOrderRefundModel() if 'amount' in d: o.amount = d['amount'] if 'biz_no' in d: o.biz_no = d['biz_no'] if 'currency_value' in d: o.currency_value = d['currency_value'] if 'ext_info' in d: o.ext_info = d['ext_info'] if 'order_no' in d: o.order_no = d['order_no'] if 'refund_type' in d: o.refund_type = d['refund_type'] if 'remark' in d: o.remark = d['remark'] if 'request_no' in d: o.request_no = d['request_no'] if 'request_time' in d: o.request_time = d['request_time'] if 'scene_type' in d: o.scene_type = d['scene_type'] return o
alipay/aop/api/domain/MybankPaymentTradeFinancingOrderRefundModel.py
5,299
!/usr/bin/env python -*- coding: utf-8 -*-
42
en
0.34282
# GENERATED BY KOMAND SDK - DO NOT EDIT import komand import json class Input: RESOURCEGROUP = "resourceGroup" SUBSCRIPTIONID = "subscriptionId" class Output: VALUE = "value" class ListVmInput(komand.Input): schema = json.loads(""" { "type": "object", "title": "Variables", "properties": { "resourceGroup": { "type": "string", "title": "Resource Group", "description": "The resource group that will contain the virtual machine", "order": 2 }, "subscriptionId": { "type": "string", "title": "Subscription ID", "description": "The identifier of your subscription", "order": 1 } }, "required": [ "subscriptionId", "resourceGroup" ] } """) def __init__(self): super(self.__class__, self).__init__(self.schema) class ListVmOutput(komand.Output): schema = json.loads(""" { "type": "object", "title": "Variables", "properties": { "value": { "type": "array", "title": "Value", "description": "List items virtual machine in a resource group", "items": { "$ref": "#/definitions/value_vm" }, "order": 1 } }, "definitions": { "additionalUnattendContent": { "type": "object", "title": "additionalUnattendContent", "properties": { "component": { "type": "string", "title": "Component", "description": "Specifies the name of the component to configure with the added content", "order": 1 }, "content": { "type": "string", "title": "Content", "description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component", "order": 2 }, "pass": { "type": "string", "title": "Pass", "description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem", "order": 3 }, "settingName": { "type": "string", "title": "Setting Name", "description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon", "order": 4 } } }, "availabilitySet": { "type": "object", "title": "availabilitySet", "properties": { "id": { "type": "string", "title": "ID", "description": "Specifies the resource ID", "order": 1 } } }, "bootDiagnostics": { "type": "object", "title": "bootDiagnostics", "properties": { "enabled": { "type": "boolean", "title": "Enabled", "description": "Specifies if the boot diagnostics is enabled", "order": 1 }, "storageUri": { "type": "string", "title": "Storage Uri", "description": "Uri of the storage account to use for placing the console output and screenshot", "order": 2 } } }, "diagnosticsProfile": { "type": "object", "title": "diagnosticsProfile", "properties": { "bootDiagnostics": { "$ref": "#/definitions/bootDiagnostics", "title": "Boot Diagnostics", "description": "Boot diagnostics is a debugging feature which allows you to view console Output and screenshot to diagnose vm status", "order": 1 } }, "definitions": { "bootDiagnostics": { "type": "object", "title": "bootDiagnostics", "properties": { "enabled": { "type": "boolean", "title": "Enabled", "description": "Specifies if the boot diagnostics is enabled", "order": 1 }, "storageUri": { "type": "string", "title": "Storage Uri", "description": "Uri of the storage account to use for placing the console output and screenshot", "order": 2 } } } } }, "hardwareProfile": { "type": "object", "title": "hardwareProfile", "properties": { "vmSize": { "type": "string", "title": "VM Size", "description": "Specifies the size of the virtual machine", "order": 1 } } }, "imageReference": { "type": "object", "title": "imageReference", "properties": { "id": { "type": "string", "title": "Image Reference", "description": "Specifies the resource identifier of a virtual machine image in your subscription", "order": 1 }, "offer": { "type": "string", "title": "Offer", "description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine", "order": 2 }, "publisher": { "type": "string", "title": "Publisher", "description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine", "order": 3 }, "sku": { "type": "string", "title": "SKU", "description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine", "order": 4 }, "version": { "type": "string", "title": "Version", "description": "Specifies the version of the platform image or marketplace image used to create the virtual machine", "order": 5 } } }, "linuxConfiguration": { "type": "object", "title": "linuxConfiguration", "properties": { "disablePasswordAuthentication": { "type": "boolean", "title": "Disable Password Authentication", "description": "Specifies whether password authentication should be disabled", "order": 1 }, "ssh": { "$ref": "#/definitions/ssh", "title": "SSH", "description": "Specifies a collection of keys to be placed on the virtual machine", "order": 2 } }, "definitions": { "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } }, "ssh": { "type": "object", "title": "ssh", "properties": { "publicKeys": { "type": "array", "title": "Public Keys", "description": "Specifies a collection of keys to be placed on the virtual machine", "items": { "$ref": "#/definitions/publicKeys" }, "order": 1 } }, "definitions": { "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } } } } } }, "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } }, "managedDisk": { "type": "object", "title": "managedDisk", "properties": { "Id": { "type": "string", "title": "ID", "description": "Specifies the resource identifier of the managed disk", "order": 1 }, "storageAccountType": { "type": "string", "title": "Storage Account Type", "description": "Specifies the storage account type for the managed disk", "order": 2 } } }, "networkProfile": { "type": "object", "title": "networkProfile", "properties": { "networkInterfaces": { "type": "array", "title": "Network Interfaces", "description": "Specifies the list of resource ids for the network interfaces associated with the virtual machine", "items": { "$ref": "#/definitions/availabilitySet" }, "order": 1 } }, "definitions": { "availabilitySet": { "type": "object", "title": "availabilitySet", "properties": { "id": { "type": "string", "title": "ID", "description": "Specifies the resource ID", "order": 1 } } } } }, "osDisk": { "type": "object", "title": "osDisk", "properties": { "caching": { "type": "string", "title": "Caching", "description": "Specifies the caching requirements", "order": 1 }, "createOption": { "type": "string", "title": "Create Option", "description": "Specifies how the virtual machine should be created", "order": 2 }, "managedDisk": { "$ref": "#/definitions/managedDisk", "title": "Managed Disk", "description": "Specified the identifier and optional storage account type for the disk", "order": 3 }, "name": { "type": "string", "title": "Name", "description": "Specifies the disk name", "order": 4 }, "osType": { "type": "string", "title": "OS Type", "description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd", "order": 5 }, "vhd": { "$ref": "#/definitions/vhd", "title": "VHD", "description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed", "order": 6 } }, "definitions": { "managedDisk": { "type": "object", "title": "managedDisk", "properties": { "Id": { "type": "string", "title": "ID", "description": "Specifies the resource identifier of the managed disk", "order": 1 }, "storageAccountType": { "type": "string", "title": "Storage Account Type", "description": "Specifies the storage account type for the managed disk", "order": 2 } } }, "vhd": { "type": "object", "title": "vhd", "properties": { "uri": { "type": "string", "title": "VHD", "description": "Specifies the vhd uri", "order": 1 } } } } }, "osProfile": { "type": "object", "title": "osProfile", "properties": { "adminPassword": { "type": "string", "title": "Admin Password", "description": "Specifies the password of the administrator account", "order": 1 }, "adminUsername": { "type": "string", "title": "Admin UserName", "description": "Specifies the name of the administrator account", "order": 2 }, "computerName": { "type": "string", "title": "Computer Name", "description": "Specifies the host os name of the virtual machine", "order": 3 }, "customData": { "type": "string", "title": "Custom Data", "description": "Specifies a base-64 encoded string of custom data", "order": 4 }, "linuxConfiguration": { "$ref": "#/definitions/linuxConfiguration", "title": "Linux Configuration", "description": "Specifies the linux operating system settings on the virtual machine", "order": 7 }, "secrets": { "type": "array", "title": "Secrets", "description": "Specifies set of certificates that should be installed onto the virtual machine", "items": { "type": "object" }, "order": 5 }, "windowsConfiguration": { "$ref": "#/definitions/windowsConfiguration", "title": "Windows Configuration", "description": "Specifies windows operating system settings on the virtual machine", "order": 6 } }, "definitions": { "additionalUnattendContent": { "type": "object", "title": "additionalUnattendContent", "properties": { "component": { "type": "string", "title": "Component", "description": "Specifies the name of the component to configure with the added content", "order": 1 }, "content": { "type": "string", "title": "Content", "description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component", "order": 2 }, "pass": { "type": "string", "title": "Pass", "description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem", "order": 3 }, "settingName": { "type": "string", "title": "Setting Name", "description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon", "order": 4 } } }, "linuxConfiguration": { "type": "object", "title": "linuxConfiguration", "properties": { "disablePasswordAuthentication": { "type": "boolean", "title": "Disable Password Authentication", "description": "Specifies whether password authentication should be disabled", "order": 1 }, "ssh": { "$ref": "#/definitions/ssh", "title": "SSH", "description": "Specifies a collection of keys to be placed on the virtual machine", "order": 2 } }, "definitions": { "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } }, "ssh": { "type": "object", "title": "ssh", "properties": { "publicKeys": { "type": "array", "title": "Public Keys", "description": "Specifies a collection of keys to be placed on the virtual machine", "items": { "$ref": "#/definitions/publicKeys" }, "order": 1 } }, "definitions": { "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } } } } } }, "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } }, "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } }, "ssh": { "type": "object", "title": "ssh", "properties": { "publicKeys": { "type": "array", "title": "Public Keys", "description": "Specifies a collection of keys to be placed on the virtual machine", "items": { "$ref": "#/definitions/publicKeys" }, "order": 1 } }, "definitions": { "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } } } }, "winRM": { "type": "object", "title": "winRM", "properties": { "listeners": { "type": "array", "title": "Listeners", "items": { "$ref": "#/definitions/listeners" }, "order": 1 } }, "definitions": { "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } } } }, "windowsConfiguration": { "type": "object", "title": "windowsConfiguration", "properties": { "additionalUnattendContent": { "$ref": "#/definitions/additionalUnattendContent", "title": "Additional Unattend Content", "description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup", "order": 1 }, "enableAutomaticUpdates": { "type": "boolean", "title": "Enable Automatic Updates", "description": "Indicates whether virtual machine is enabled for automatic updates", "order": 2 }, "provisionVMAgent": { "type": "boolean", "title": "Provision VM Agent", "description": "Indicates whether virtual machine agent should be provisioned on the virtual machine", "order": 3 }, "winRM": { "$ref": "#/definitions/winRM", "title": "Win RM", "description": "Specifies the windows remote management listeners, this enables remote windows powershell", "order": 4 }, "winrRMListener": { "$ref": "#/definitions/listeners", "title": "WinrRM Listener", "description": "Contains configuration settings for the windows remote management service on the virtual machine", "order": 5 } }, "definitions": { "additionalUnattendContent": { "type": "object", "title": "additionalUnattendContent", "properties": { "component": { "type": "string", "title": "Component", "description": "Specifies the name of the component to configure with the added content", "order": 1 }, "content": { "type": "string", "title": "Content", "description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component", "order": 2 }, "pass": { "type": "string", "title": "Pass", "description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem", "order": 3 }, "settingName": { "type": "string", "title": "Setting Name", "description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon", "order": 4 } } }, "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } }, "winRM": { "type": "object", "title": "winRM", "properties": { "listeners": { "type": "array", "title": "Listeners", "items": { "$ref": "#/definitions/listeners" }, "order": 1 } }, "definitions": { "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } } } } } } } }, "properties": { "type": "object", "title": "properties", "properties": { "availabilitySet": { "$ref": "#/definitions/availabilitySet", "title": "Availability Set", "description": "The availability set that contains the virtual machine", "order": 1 }, "diagnosticsProfile": { "$ref": "#/definitions/diagnosticsProfile", "title": "Diagnostics Profile", "description": "Specifies the boot diagnostic settings state", "order": 2 }, "hardwareProfile": { "$ref": "#/definitions/hardwareProfile", "title": "Hardware Profile", "description": "Specifies the hardware settings for the virtual machine", "order": 3 }, "networkProfile": { "$ref": "#/definitions/networkProfile", "title": "Network Profile", "description": "Specifies the network interfaces of the virtual machine", "order": 4 }, "osProfile": { "$ref": "#/definitions/osProfile", "title": "OS Profile", "description": "Specifies the operating system settings for the virtual machine", "order": 5 }, "provisioningState": { "type": "string", "title": "Provisioning State", "description": "Specifies the provisioned state of the virtual machine", "order": 6 }, "storageProfile": { "$ref": "#/definitions/storageProfile", "title": "Storage Profile", "description": "Specifies the storage settings for the virtual machine disks", "order": 7 }, "vmId": { "type": "string", "title": "Virtual Machine ID", "description": "The vm unique id", "order": 8 } }, "definitions": { "additionalUnattendContent": { "type": "object", "title": "additionalUnattendContent", "properties": { "component": { "type": "string", "title": "Component", "description": "Specifies the name of the component to configure with the added content", "order": 1 }, "content": { "type": "string", "title": "Content", "description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component", "order": 2 }, "pass": { "type": "string", "title": "Pass", "description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem", "order": 3 }, "settingName": { "type": "string", "title": "Setting Name", "description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon", "order": 4 } } }, "availabilitySet": { "type": "object", "title": "availabilitySet", "properties": { "id": { "type": "string", "title": "ID", "description": "Specifies the resource ID", "order": 1 } } }, "bootDiagnostics": { "type": "object", "title": "bootDiagnostics", "properties": { "enabled": { "type": "boolean", "title": "Enabled", "description": "Specifies if the boot diagnostics is enabled", "order": 1 }, "storageUri": { "type": "string", "title": "Storage Uri", "description": "Uri of the storage account to use for placing the console output and screenshot", "order": 2 } } }, "diagnosticsProfile": { "type": "object", "title": "diagnosticsProfile", "properties": { "bootDiagnostics": { "$ref": "#/definitions/bootDiagnostics", "title": "Boot Diagnostics", "description": "Boot diagnostics is a debugging feature which allows you to view console Output and screenshot to diagnose vm status", "order": 1 } }, "definitions": { "bootDiagnostics": { "type": "object", "title": "bootDiagnostics", "properties": { "enabled": { "type": "boolean", "title": "Enabled", "description": "Specifies if the boot diagnostics is enabled", "order": 1 }, "storageUri": { "type": "string", "title": "Storage Uri", "description": "Uri of the storage account to use for placing the console output and screenshot", "order": 2 } } } } }, "hardwareProfile": { "type": "object", "title": "hardwareProfile", "properties": { "vmSize": { "type": "string", "title": "VM Size", "description": "Specifies the size of the virtual machine", "order": 1 } } }, "imageReference": { "type": "object", "title": "imageReference", "properties": { "id": { "type": "string", "title": "Image Reference", "description": "Specifies the resource identifier of a virtual machine image in your subscription", "order": 1 }, "offer": { "type": "string", "title": "Offer", "description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine", "order": 2 }, "publisher": { "type": "string", "title": "Publisher", "description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine", "order": 3 }, "sku": { "type": "string", "title": "SKU", "description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine", "order": 4 }, "version": { "type": "string", "title": "Version", "description": "Specifies the version of the platform image or marketplace image used to create the virtual machine", "order": 5 } } }, "linuxConfiguration": { "type": "object", "title": "linuxConfiguration", "properties": { "disablePasswordAuthentication": { "type": "boolean", "title": "Disable Password Authentication", "description": "Specifies whether password authentication should be disabled", "order": 1 }, "ssh": { "$ref": "#/definitions/ssh", "title": "SSH", "description": "Specifies a collection of keys to be placed on the virtual machine", "order": 2 } }, "definitions": { "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } }, "ssh": { "type": "object", "title": "ssh", "properties": { "publicKeys": { "type": "array", "title": "Public Keys", "description": "Specifies a collection of keys to be placed on the virtual machine", "items": { "$ref": "#/definitions/publicKeys" }, "order": 1 } }, "definitions": { "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } } } } } }, "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } }, "managedDisk": { "type": "object", "title": "managedDisk", "properties": { "Id": { "type": "string", "title": "ID", "description": "Specifies the resource identifier of the managed disk", "order": 1 }, "storageAccountType": { "type": "string", "title": "Storage Account Type", "description": "Specifies the storage account type for the managed disk", "order": 2 } } }, "networkProfile": { "type": "object", "title": "networkProfile", "properties": { "networkInterfaces": { "type": "array", "title": "Network Interfaces", "description": "Specifies the list of resource ids for the network interfaces associated with the virtual machine", "items": { "$ref": "#/definitions/availabilitySet" }, "order": 1 } }, "definitions": { "availabilitySet": { "type": "object", "title": "availabilitySet", "properties": { "id": { "type": "string", "title": "ID", "description": "Specifies the resource ID", "order": 1 } } } } }, "osDisk": { "type": "object", "title": "osDisk", "properties": { "caching": { "type": "string", "title": "Caching", "description": "Specifies the caching requirements", "order": 1 }, "createOption": { "type": "string", "title": "Create Option", "description": "Specifies how the virtual machine should be created", "order": 2 }, "managedDisk": { "$ref": "#/definitions/managedDisk", "title": "Managed Disk", "description": "Specified the identifier and optional storage account type for the disk", "order": 3 }, "name": { "type": "string", "title": "Name", "description": "Specifies the disk name", "order": 4 }, "osType": { "type": "string", "title": "OS Type", "description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd", "order": 5 }, "vhd": { "$ref": "#/definitions/vhd", "title": "VHD", "description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed", "order": 6 } }, "definitions": { "managedDisk": { "type": "object", "title": "managedDisk", "properties": { "Id": { "type": "string", "title": "ID", "description": "Specifies the resource identifier of the managed disk", "order": 1 }, "storageAccountType": { "type": "string", "title": "Storage Account Type", "description": "Specifies the storage account type for the managed disk", "order": 2 } } }, "vhd": { "type": "object", "title": "vhd", "properties": { "uri": { "type": "string", "title": "VHD", "description": "Specifies the vhd uri", "order": 1 } } } } }, "osProfile": { "type": "object", "title": "osProfile", "properties": { "adminPassword": { "type": "string", "title": "Admin Password", "description": "Specifies the password of the administrator account", "order": 1 }, "adminUsername": { "type": "string", "title": "Admin UserName", "description": "Specifies the name of the administrator account", "order": 2 }, "computerName": { "type": "string", "title": "Computer Name", "description": "Specifies the host os name of the virtual machine", "order": 3 }, "customData": { "type": "string", "title": "Custom Data", "description": "Specifies a base-64 encoded string of custom data", "order": 4 }, "linuxConfiguration": { "$ref": "#/definitions/linuxConfiguration", "title": "Linux Configuration", "description": "Specifies the linux operating system settings on the virtual machine", "order": 7 }, "secrets": { "type": "array", "title": "Secrets", "description": "Specifies set of certificates that should be installed onto the virtual machine", "items": { "type": "object" }, "order": 5 }, "windowsConfiguration": { "$ref": "#/definitions/windowsConfiguration", "title": "Windows Configuration", "description": "Specifies windows operating system settings on the virtual machine", "order": 6 } }, "definitions": { "additionalUnattendContent": { "type": "object", "title": "additionalUnattendContent", "properties": { "component": { "type": "string", "title": "Component", "description": "Specifies the name of the component to configure with the added content", "order": 1 }, "content": { "type": "string", "title": "Content", "description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component", "order": 2 }, "pass": { "type": "string", "title": "Pass", "description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem", "order": 3 }, "settingName": { "type": "string", "title": "Setting Name", "description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon", "order": 4 } } }, "linuxConfiguration": { "type": "object", "title": "linuxConfiguration", "properties": { "disablePasswordAuthentication": { "type": "boolean", "title": "Disable Password Authentication", "description": "Specifies whether password authentication should be disabled", "order": 1 }, "ssh": { "$ref": "#/definitions/ssh", "title": "SSH", "description": "Specifies a collection of keys to be placed on the virtual machine", "order": 2 } }, "definitions": { "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } }, "ssh": { "type": "object", "title": "ssh", "properties": { "publicKeys": { "type": "array", "title": "Public Keys", "description": "Specifies a collection of keys to be placed on the virtual machine", "items": { "$ref": "#/definitions/publicKeys" }, "order": 1 } }, "definitions": { "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } } } } } }, "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } }, "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } }, "ssh": { "type": "object", "title": "ssh", "properties": { "publicKeys": { "type": "array", "title": "Public Keys", "description": "Specifies a collection of keys to be placed on the virtual machine", "items": { "$ref": "#/definitions/publicKeys" }, "order": 1 } }, "definitions": { "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } } } }, "winRM": { "type": "object", "title": "winRM", "properties": { "listeners": { "type": "array", "title": "Listeners", "items": { "$ref": "#/definitions/listeners" }, "order": 1 } }, "definitions": { "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } } } }, "windowsConfiguration": { "type": "object", "title": "windowsConfiguration", "properties": { "additionalUnattendContent": { "$ref": "#/definitions/additionalUnattendContent", "title": "Additional Unattend Content", "description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup", "order": 1 }, "enableAutomaticUpdates": { "type": "boolean", "title": "Enable Automatic Updates", "description": "Indicates whether virtual machine is enabled for automatic updates", "order": 2 }, "provisionVMAgent": { "type": "boolean", "title": "Provision VM Agent", "description": "Indicates whether virtual machine agent should be provisioned on the virtual machine", "order": 3 }, "winRM": { "$ref": "#/definitions/winRM", "title": "Win RM", "description": "Specifies the windows remote management listeners, this enables remote windows powershell", "order": 4 }, "winrRMListener": { "$ref": "#/definitions/listeners", "title": "WinrRM Listener", "description": "Contains configuration settings for the windows remote management service on the virtual machine", "order": 5 } }, "definitions": { "additionalUnattendContent": { "type": "object", "title": "additionalUnattendContent", "properties": { "component": { "type": "string", "title": "Component", "description": "Specifies the name of the component to configure with the added content", "order": 1 }, "content": { "type": "string", "title": "Content", "description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component", "order": 2 }, "pass": { "type": "string", "title": "Pass", "description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem", "order": 3 }, "settingName": { "type": "string", "title": "Setting Name", "description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon", "order": 4 } } }, "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } }, "winRM": { "type": "object", "title": "winRM", "properties": { "listeners": { "type": "array", "title": "Listeners", "items": { "$ref": "#/definitions/listeners" }, "order": 1 } }, "definitions": { "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } } } } } } } }, "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } }, "ssh": { "type": "object", "title": "ssh", "properties": { "publicKeys": { "type": "array", "title": "Public Keys", "description": "Specifies a collection of keys to be placed on the virtual machine", "items": { "$ref": "#/definitions/publicKeys" }, "order": 1 } }, "definitions": { "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } } } }, "storageProfile": { "type": "object", "title": "storageProfile", "properties": { "dataDisks": { "type": "array", "title": "Data Disks", "description": "Specifies the parameters that are used to add a data disk to a virtual machine", "items": { "type": "object" }, "order": 1 }, "imageReference": { "$ref": "#/definitions/imageReference", "title": "Image Reference", "description": "Specifies information about the image to use", "order": 2 }, "osDisk": { "$ref": "#/definitions/osDisk", "title": "OS Disk", "description": "Specifies information about the operating system disk used by the virtual machine", "order": 3 } }, "definitions": { "imageReference": { "type": "object", "title": "imageReference", "properties": { "id": { "type": "string", "title": "Image Reference", "description": "Specifies the resource identifier of a virtual machine image in your subscription", "order": 1 }, "offer": { "type": "string", "title": "Offer", "description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine", "order": 2 }, "publisher": { "type": "string", "title": "Publisher", "description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine", "order": 3 }, "sku": { "type": "string", "title": "SKU", "description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine", "order": 4 }, "version": { "type": "string", "title": "Version", "description": "Specifies the version of the platform image or marketplace image used to create the virtual machine", "order": 5 } } }, "managedDisk": { "type": "object", "title": "managedDisk", "properties": { "Id": { "type": "string", "title": "ID", "description": "Specifies the resource identifier of the managed disk", "order": 1 }, "storageAccountType": { "type": "string", "title": "Storage Account Type", "description": "Specifies the storage account type for the managed disk", "order": 2 } } }, "osDisk": { "type": "object", "title": "osDisk", "properties": { "caching": { "type": "string", "title": "Caching", "description": "Specifies the caching requirements", "order": 1 }, "createOption": { "type": "string", "title": "Create Option", "description": "Specifies how the virtual machine should be created", "order": 2 }, "managedDisk": { "$ref": "#/definitions/managedDisk", "title": "Managed Disk", "description": "Specified the identifier and optional storage account type for the disk", "order": 3 }, "name": { "type": "string", "title": "Name", "description": "Specifies the disk name", "order": 4 }, "osType": { "type": "string", "title": "OS Type", "description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd", "order": 5 }, "vhd": { "$ref": "#/definitions/vhd", "title": "VHD", "description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed", "order": 6 } }, "definitions": { "managedDisk": { "type": "object", "title": "managedDisk", "properties": { "Id": { "type": "string", "title": "ID", "description": "Specifies the resource identifier of the managed disk", "order": 1 }, "storageAccountType": { "type": "string", "title": "Storage Account Type", "description": "Specifies the storage account type for the managed disk", "order": 2 } } }, "vhd": { "type": "object", "title": "vhd", "properties": { "uri": { "type": "string", "title": "VHD", "description": "Specifies the vhd uri", "order": 1 } } } } }, "vhd": { "type": "object", "title": "vhd", "properties": { "uri": { "type": "string", "title": "VHD", "description": "Specifies the vhd uri", "order": 1 } } } } }, "vhd": { "type": "object", "title": "vhd", "properties": { "uri": { "type": "string", "title": "VHD", "description": "Specifies the vhd uri", "order": 1 } } }, "winRM": { "type": "object", "title": "winRM", "properties": { "listeners": { "type": "array", "title": "Listeners", "items": { "$ref": "#/definitions/listeners" }, "order": 1 } }, "definitions": { "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } } } }, "windowsConfiguration": { "type": "object", "title": "windowsConfiguration", "properties": { "additionalUnattendContent": { "$ref": "#/definitions/additionalUnattendContent", "title": "Additional Unattend Content", "description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup", "order": 1 }, "enableAutomaticUpdates": { "type": "boolean", "title": "Enable Automatic Updates", "description": "Indicates whether virtual machine is enabled for automatic updates", "order": 2 }, "provisionVMAgent": { "type": "boolean", "title": "Provision VM Agent", "description": "Indicates whether virtual machine agent should be provisioned on the virtual machine", "order": 3 }, "winRM": { "$ref": "#/definitions/winRM", "title": "Win RM", "description": "Specifies the windows remote management listeners, this enables remote windows powershell", "order": 4 }, "winrRMListener": { "$ref": "#/definitions/listeners", "title": "WinrRM Listener", "description": "Contains configuration settings for the windows remote management service on the virtual machine", "order": 5 } }, "definitions": { "additionalUnattendContent": { "type": "object", "title": "additionalUnattendContent", "properties": { "component": { "type": "string", "title": "Component", "description": "Specifies the name of the component to configure with the added content", "order": 1 }, "content": { "type": "string", "title": "Content", "description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component", "order": 2 }, "pass": { "type": "string", "title": "Pass", "description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem", "order": 3 }, "settingName": { "type": "string", "title": "Setting Name", "description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon", "order": 4 } } }, "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } }, "winRM": { "type": "object", "title": "winRM", "properties": { "listeners": { "type": "array", "title": "Listeners", "items": { "$ref": "#/definitions/listeners" }, "order": 1 } }, "definitions": { "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } } } } } } } }, "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } }, "ssh": { "type": "object", "title": "ssh", "properties": { "publicKeys": { "type": "array", "title": "Public Keys", "description": "Specifies a collection of keys to be placed on the virtual machine", "items": { "$ref": "#/definitions/publicKeys" }, "order": 1 } }, "definitions": { "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } } } }, "storageProfile": { "type": "object", "title": "storageProfile", "properties": { "dataDisks": { "type": "array", "title": "Data Disks", "description": "Specifies the parameters that are used to add a data disk to a virtual machine", "items": { "type": "object" }, "order": 1 }, "imageReference": { "$ref": "#/definitions/imageReference", "title": "Image Reference", "description": "Specifies information about the image to use", "order": 2 }, "osDisk": { "$ref": "#/definitions/osDisk", "title": "OS Disk", "description": "Specifies information about the operating system disk used by the virtual machine", "order": 3 } }, "definitions": { "imageReference": { "type": "object", "title": "imageReference", "properties": { "id": { "type": "string", "title": "Image Reference", "description": "Specifies the resource identifier of a virtual machine image in your subscription", "order": 1 }, "offer": { "type": "string", "title": "Offer", "description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine", "order": 2 }, "publisher": { "type": "string", "title": "Publisher", "description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine", "order": 3 }, "sku": { "type": "string", "title": "SKU", "description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine", "order": 4 }, "version": { "type": "string", "title": "Version", "description": "Specifies the version of the platform image or marketplace image used to create the virtual machine", "order": 5 } } }, "managedDisk": { "type": "object", "title": "managedDisk", "properties": { "Id": { "type": "string", "title": "ID", "description": "Specifies the resource identifier of the managed disk", "order": 1 }, "storageAccountType": { "type": "string", "title": "Storage Account Type", "description": "Specifies the storage account type for the managed disk", "order": 2 } } }, "osDisk": { "type": "object", "title": "osDisk", "properties": { "caching": { "type": "string", "title": "Caching", "description": "Specifies the caching requirements", "order": 1 }, "createOption": { "type": "string", "title": "Create Option", "description": "Specifies how the virtual machine should be created", "order": 2 }, "managedDisk": { "$ref": "#/definitions/managedDisk", "title": "Managed Disk", "description": "Specified the identifier and optional storage account type for the disk", "order": 3 }, "name": { "type": "string", "title": "Name", "description": "Specifies the disk name", "order": 4 }, "osType": { "type": "string", "title": "OS Type", "description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd", "order": 5 }, "vhd": { "$ref": "#/definitions/vhd", "title": "VHD", "description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed", "order": 6 } }, "definitions": { "managedDisk": { "type": "object", "title": "managedDisk", "properties": { "Id": { "type": "string", "title": "ID", "description": "Specifies the resource identifier of the managed disk", "order": 1 }, "storageAccountType": { "type": "string", "title": "Storage Account Type", "description": "Specifies the storage account type for the managed disk", "order": 2 } } }, "vhd": { "type": "object", "title": "vhd", "properties": { "uri": { "type": "string", "title": "VHD", "description": "Specifies the vhd uri", "order": 1 } } } } }, "vhd": { "type": "object", "title": "vhd", "properties": { "uri": { "type": "string", "title": "VHD", "description": "Specifies the vhd uri", "order": 1 } } } } }, "tags": { "type": "object", "title": "tags", "properties": { "tags": { "type": "object", "title": "Tags", "description": "Tags", "order": 1 } } }, "value_vm": { "type": "object", "title": "value_vm", "properties": { "id": { "type": "string", "title": "ID", "description": "Specifies the identifying url of the virtual machine", "order": 1 }, "location": { "type": "string", "title": "Location", "description": "Specifies the supported Azure location where the virtual machine should be created", "order": 2 }, "name": { "type": "string", "title": "Name Virtual Machine", "description": "The name of the virtual machine", "order": 3 }, "properties": { "$ref": "#/definitions/properties", "title": "Properties", "description": "Specifies the properties of the virtual machine", "order": 4 }, "tags": { "$ref": "#/definitions/tags", "title": "Tags", "description": "Specifies the tags that are assigned to the virtual machine", "order": 6 }, "type": { "type": "string", "title": "Type", "description": "Specifies the type of compute resource", "order": 5 } }, "definitions": { "additionalUnattendContent": { "type": "object", "title": "additionalUnattendContent", "properties": { "component": { "type": "string", "title": "Component", "description": "Specifies the name of the component to configure with the added content", "order": 1 }, "content": { "type": "string", "title": "Content", "description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component", "order": 2 }, "pass": { "type": "string", "title": "Pass", "description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem", "order": 3 }, "settingName": { "type": "string", "title": "Setting Name", "description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon", "order": 4 } } }, "availabilitySet": { "type": "object", "title": "availabilitySet", "properties": { "id": { "type": "string", "title": "ID", "description": "Specifies the resource ID", "order": 1 } } }, "bootDiagnostics": { "type": "object", "title": "bootDiagnostics", "properties": { "enabled": { "type": "boolean", "title": "Enabled", "description": "Specifies if the boot diagnostics is enabled", "order": 1 }, "storageUri": { "type": "string", "title": "Storage Uri", "description": "Uri of the storage account to use for placing the console output and screenshot", "order": 2 } } }, "diagnosticsProfile": { "type": "object", "title": "diagnosticsProfile", "properties": { "bootDiagnostics": { "$ref": "#/definitions/bootDiagnostics", "title": "Boot Diagnostics", "description": "Boot diagnostics is a debugging feature which allows you to view console Output and screenshot to diagnose vm status", "order": 1 } }, "definitions": { "bootDiagnostics": { "type": "object", "title": "bootDiagnostics", "properties": { "enabled": { "type": "boolean", "title": "Enabled", "description": "Specifies if the boot diagnostics is enabled", "order": 1 }, "storageUri": { "type": "string", "title": "Storage Uri", "description": "Uri of the storage account to use for placing the console output and screenshot", "order": 2 } } } } }, "hardwareProfile": { "type": "object", "title": "hardwareProfile", "properties": { "vmSize": { "type": "string", "title": "VM Size", "description": "Specifies the size of the virtual machine", "order": 1 } } }, "imageReference": { "type": "object", "title": "imageReference", "properties": { "id": { "type": "string", "title": "Image Reference", "description": "Specifies the resource identifier of a virtual machine image in your subscription", "order": 1 }, "offer": { "type": "string", "title": "Offer", "description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine", "order": 2 }, "publisher": { "type": "string", "title": "Publisher", "description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine", "order": 3 }, "sku": { "type": "string", "title": "SKU", "description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine", "order": 4 }, "version": { "type": "string", "title": "Version", "description": "Specifies the version of the platform image or marketplace image used to create the virtual machine", "order": 5 } } }, "linuxConfiguration": { "type": "object", "title": "linuxConfiguration", "properties": { "disablePasswordAuthentication": { "type": "boolean", "title": "Disable Password Authentication", "description": "Specifies whether password authentication should be disabled", "order": 1 }, "ssh": { "$ref": "#/definitions/ssh", "title": "SSH", "description": "Specifies a collection of keys to be placed on the virtual machine", "order": 2 } }, "definitions": { "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } }, "ssh": { "type": "object", "title": "ssh", "properties": { "publicKeys": { "type": "array", "title": "Public Keys", "description": "Specifies a collection of keys to be placed on the virtual machine", "items": { "$ref": "#/definitions/publicKeys" }, "order": 1 } }, "definitions": { "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } } } } } }, "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } }, "managedDisk": { "type": "object", "title": "managedDisk", "properties": { "Id": { "type": "string", "title": "ID", "description": "Specifies the resource identifier of the managed disk", "order": 1 }, "storageAccountType": { "type": "string", "title": "Storage Account Type", "description": "Specifies the storage account type for the managed disk", "order": 2 } } }, "networkProfile": { "type": "object", "title": "networkProfile", "properties": { "networkInterfaces": { "type": "array", "title": "Network Interfaces", "description": "Specifies the list of resource ids for the network interfaces associated with the virtual machine", "items": { "$ref": "#/definitions/availabilitySet" }, "order": 1 } }, "definitions": { "availabilitySet": { "type": "object", "title": "availabilitySet", "properties": { "id": { "type": "string", "title": "ID", "description": "Specifies the resource ID", "order": 1 } } } } }, "osDisk": { "type": "object", "title": "osDisk", "properties": { "caching": { "type": "string", "title": "Caching", "description": "Specifies the caching requirements", "order": 1 }, "createOption": { "type": "string", "title": "Create Option", "description": "Specifies how the virtual machine should be created", "order": 2 }, "managedDisk": { "$ref": "#/definitions/managedDisk", "title": "Managed Disk", "description": "Specified the identifier and optional storage account type for the disk", "order": 3 }, "name": { "type": "string", "title": "Name", "description": "Specifies the disk name", "order": 4 }, "osType": { "type": "string", "title": "OS Type", "description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd", "order": 5 }, "vhd": { "$ref": "#/definitions/vhd", "title": "VHD", "description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed", "order": 6 } }, "definitions": { "managedDisk": { "type": "object", "title": "managedDisk", "properties": { "Id": { "type": "string", "title": "ID", "description": "Specifies the resource identifier of the managed disk", "order": 1 }, "storageAccountType": { "type": "string", "title": "Storage Account Type", "description": "Specifies the storage account type for the managed disk", "order": 2 } } }, "vhd": { "type": "object", "title": "vhd", "properties": { "uri": { "type": "string", "title": "VHD", "description": "Specifies the vhd uri", "order": 1 } } } } }, "osProfile": { "type": "object", "title": "osProfile", "properties": { "adminPassword": { "type": "string", "title": "Admin Password", "description": "Specifies the password of the administrator account", "order": 1 }, "adminUsername": { "type": "string", "title": "Admin UserName", "description": "Specifies the name of the administrator account", "order": 2 }, "computerName": { "type": "string", "title": "Computer Name", "description": "Specifies the host os name of the virtual machine", "order": 3 }, "customData": { "type": "string", "title": "Custom Data", "description": "Specifies a base-64 encoded string of custom data", "order": 4 }, "linuxConfiguration": { "$ref": "#/definitions/linuxConfiguration", "title": "Linux Configuration", "description": "Specifies the linux operating system settings on the virtual machine", "order": 7 }, "secrets": { "type": "array", "title": "Secrets", "description": "Specifies set of certificates that should be installed onto the virtual machine", "items": { "type": "object" }, "order": 5 }, "windowsConfiguration": { "$ref": "#/definitions/windowsConfiguration", "title": "Windows Configuration", "description": "Specifies windows operating system settings on the virtual machine", "order": 6 } }, "definitions": { "additionalUnattendContent": { "type": "object", "title": "additionalUnattendContent", "properties": { "component": { "type": "string", "title": "Component", "description": "Specifies the name of the component to configure with the added content", "order": 1 }, "content": { "type": "string", "title": "Content", "description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component", "order": 2 }, "pass": { "type": "string", "title": "Pass", "description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem", "order": 3 }, "settingName": { "type": "string", "title": "Setting Name", "description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon", "order": 4 } } }, "linuxConfiguration": { "type": "object", "title": "linuxConfiguration", "properties": { "disablePasswordAuthentication": { "type": "boolean", "title": "Disable Password Authentication", "description": "Specifies whether password authentication should be disabled", "order": 1 }, "ssh": { "$ref": "#/definitions/ssh", "title": "SSH", "description": "Specifies a collection of keys to be placed on the virtual machine", "order": 2 } }, "definitions": { "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } }, "ssh": { "type": "object", "title": "ssh", "properties": { "publicKeys": { "type": "array", "title": "Public Keys", "description": "Specifies a collection of keys to be placed on the virtual machine", "items": { "$ref": "#/definitions/publicKeys" }, "order": 1 } }, "definitions": { "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } } } } } }, "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } }, "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } }, "ssh": { "type": "object", "title": "ssh", "properties": { "publicKeys": { "type": "array", "title": "Public Keys", "description": "Specifies a collection of keys to be placed on the virtual machine", "items": { "$ref": "#/definitions/publicKeys" }, "order": 1 } }, "definitions": { "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } } } }, "winRM": { "type": "object", "title": "winRM", "properties": { "listeners": { "type": "array", "title": "Listeners", "items": { "$ref": "#/definitions/listeners" }, "order": 1 } }, "definitions": { "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } } } }, "windowsConfiguration": { "type": "object", "title": "windowsConfiguration", "properties": { "additionalUnattendContent": { "$ref": "#/definitions/additionalUnattendContent", "title": "Additional Unattend Content", "description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup", "order": 1 }, "enableAutomaticUpdates": { "type": "boolean", "title": "Enable Automatic Updates", "description": "Indicates whether virtual machine is enabled for automatic updates", "order": 2 }, "provisionVMAgent": { "type": "boolean", "title": "Provision VM Agent", "description": "Indicates whether virtual machine agent should be provisioned on the virtual machine", "order": 3 }, "winRM": { "$ref": "#/definitions/winRM", "title": "Win RM", "description": "Specifies the windows remote management listeners, this enables remote windows powershell", "order": 4 }, "winrRMListener": { "$ref": "#/definitions/listeners", "title": "WinrRM Listener", "description": "Contains configuration settings for the windows remote management service on the virtual machine", "order": 5 } }, "definitions": { "additionalUnattendContent": { "type": "object", "title": "additionalUnattendContent", "properties": { "component": { "type": "string", "title": "Component", "description": "Specifies the name of the component to configure with the added content", "order": 1 }, "content": { "type": "string", "title": "Content", "description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component", "order": 2 }, "pass": { "type": "string", "title": "Pass", "description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem", "order": 3 }, "settingName": { "type": "string", "title": "Setting Name", "description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon", "order": 4 } } }, "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } }, "winRM": { "type": "object", "title": "winRM", "properties": { "listeners": { "type": "array", "title": "Listeners", "items": { "$ref": "#/definitions/listeners" }, "order": 1 } }, "definitions": { "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } } } } } } } }, "properties": { "type": "object", "title": "properties", "properties": { "availabilitySet": { "$ref": "#/definitions/availabilitySet", "title": "Availability Set", "description": "The availability set that contains the virtual machine", "order": 1 }, "diagnosticsProfile": { "$ref": "#/definitions/diagnosticsProfile", "title": "Diagnostics Profile", "description": "Specifies the boot diagnostic settings state", "order": 2 }, "hardwareProfile": { "$ref": "#/definitions/hardwareProfile", "title": "Hardware Profile", "description": "Specifies the hardware settings for the virtual machine", "order": 3 }, "networkProfile": { "$ref": "#/definitions/networkProfile", "title": "Network Profile", "description": "Specifies the network interfaces of the virtual machine", "order": 4 }, "osProfile": { "$ref": "#/definitions/osProfile", "title": "OS Profile", "description": "Specifies the operating system settings for the virtual machine", "order": 5 }, "provisioningState": { "type": "string", "title": "Provisioning State", "description": "Specifies the provisioned state of the virtual machine", "order": 6 }, "storageProfile": { "$ref": "#/definitions/storageProfile", "title": "Storage Profile", "description": "Specifies the storage settings for the virtual machine disks", "order": 7 }, "vmId": { "type": "string", "title": "Virtual Machine ID", "description": "The vm unique id", "order": 8 } }, "definitions": { "additionalUnattendContent": { "type": "object", "title": "additionalUnattendContent", "properties": { "component": { "type": "string", "title": "Component", "description": "Specifies the name of the component to configure with the added content", "order": 1 }, "content": { "type": "string", "title": "Content", "description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component", "order": 2 }, "pass": { "type": "string", "title": "Pass", "description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem", "order": 3 }, "settingName": { "type": "string", "title": "Setting Name", "description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon", "order": 4 } } }, "availabilitySet": { "type": "object", "title": "availabilitySet", "properties": { "id": { "type": "string", "title": "ID", "description": "Specifies the resource ID", "order": 1 } } }, "bootDiagnostics": { "type": "object", "title": "bootDiagnostics", "properties": { "enabled": { "type": "boolean", "title": "Enabled", "description": "Specifies if the boot diagnostics is enabled", "order": 1 }, "storageUri": { "type": "string", "title": "Storage Uri", "description": "Uri of the storage account to use for placing the console output and screenshot", "order": 2 } } }, "diagnosticsProfile": { "type": "object", "title": "diagnosticsProfile", "properties": { "bootDiagnostics": { "$ref": "#/definitions/bootDiagnostics", "title": "Boot Diagnostics", "description": "Boot diagnostics is a debugging feature which allows you to view console Output and screenshot to diagnose vm status", "order": 1 } }, "definitions": { "bootDiagnostics": { "type": "object", "title": "bootDiagnostics", "properties": { "enabled": { "type": "boolean", "title": "Enabled", "description": "Specifies if the boot diagnostics is enabled", "order": 1 }, "storageUri": { "type": "string", "title": "Storage Uri", "description": "Uri of the storage account to use for placing the console output and screenshot", "order": 2 } } } } }, "hardwareProfile": { "type": "object", "title": "hardwareProfile", "properties": { "vmSize": { "type": "string", "title": "VM Size", "description": "Specifies the size of the virtual machine", "order": 1 } } }, "imageReference": { "type": "object", "title": "imageReference", "properties": { "id": { "type": "string", "title": "Image Reference", "description": "Specifies the resource identifier of a virtual machine image in your subscription", "order": 1 }, "offer": { "type": "string", "title": "Offer", "description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine", "order": 2 }, "publisher": { "type": "string", "title": "Publisher", "description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine", "order": 3 }, "sku": { "type": "string", "title": "SKU", "description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine", "order": 4 }, "version": { "type": "string", "title": "Version", "description": "Specifies the version of the platform image or marketplace image used to create the virtual machine", "order": 5 } } }, "linuxConfiguration": { "type": "object", "title": "linuxConfiguration", "properties": { "disablePasswordAuthentication": { "type": "boolean", "title": "Disable Password Authentication", "description": "Specifies whether password authentication should be disabled", "order": 1 }, "ssh": { "$ref": "#/definitions/ssh", "title": "SSH", "description": "Specifies a collection of keys to be placed on the virtual machine", "order": 2 } }, "definitions": { "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } }, "ssh": { "type": "object", "title": "ssh", "properties": { "publicKeys": { "type": "array", "title": "Public Keys", "description": "Specifies a collection of keys to be placed on the virtual machine", "items": { "$ref": "#/definitions/publicKeys" }, "order": 1 } }, "definitions": { "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } } } } } }, "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } }, "managedDisk": { "type": "object", "title": "managedDisk", "properties": { "Id": { "type": "string", "title": "ID", "description": "Specifies the resource identifier of the managed disk", "order": 1 }, "storageAccountType": { "type": "string", "title": "Storage Account Type", "description": "Specifies the storage account type for the managed disk", "order": 2 } } }, "networkProfile": { "type": "object", "title": "networkProfile", "properties": { "networkInterfaces": { "type": "array", "title": "Network Interfaces", "description": "Specifies the list of resource ids for the network interfaces associated with the virtual machine", "items": { "$ref": "#/definitions/availabilitySet" }, "order": 1 } }, "definitions": { "availabilitySet": { "type": "object", "title": "availabilitySet", "properties": { "id": { "type": "string", "title": "ID", "description": "Specifies the resource ID", "order": 1 } } } } }, "osDisk": { "type": "object", "title": "osDisk", "properties": { "caching": { "type": "string", "title": "Caching", "description": "Specifies the caching requirements", "order": 1 }, "createOption": { "type": "string", "title": "Create Option", "description": "Specifies how the virtual machine should be created", "order": 2 }, "managedDisk": { "$ref": "#/definitions/managedDisk", "title": "Managed Disk", "description": "Specified the identifier and optional storage account type for the disk", "order": 3 }, "name": { "type": "string", "title": "Name", "description": "Specifies the disk name", "order": 4 }, "osType": { "type": "string", "title": "OS Type", "description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd", "order": 5 }, "vhd": { "$ref": "#/definitions/vhd", "title": "VHD", "description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed", "order": 6 } }, "definitions": { "managedDisk": { "type": "object", "title": "managedDisk", "properties": { "Id": { "type": "string", "title": "ID", "description": "Specifies the resource identifier of the managed disk", "order": 1 }, "storageAccountType": { "type": "string", "title": "Storage Account Type", "description": "Specifies the storage account type for the managed disk", "order": 2 } } }, "vhd": { "type": "object", "title": "vhd", "properties": { "uri": { "type": "string", "title": "VHD", "description": "Specifies the vhd uri", "order": 1 } } } } }, "osProfile": { "type": "object", "title": "osProfile", "properties": { "adminPassword": { "type": "string", "title": "Admin Password", "description": "Specifies the password of the administrator account", "order": 1 }, "adminUsername": { "type": "string", "title": "Admin UserName", "description": "Specifies the name of the administrator account", "order": 2 }, "computerName": { "type": "string", "title": "Computer Name", "description": "Specifies the host os name of the virtual machine", "order": 3 }, "customData": { "type": "string", "title": "Custom Data", "description": "Specifies a base-64 encoded string of custom data", "order": 4 }, "linuxConfiguration": { "$ref": "#/definitions/linuxConfiguration", "title": "Linux Configuration", "description": "Specifies the linux operating system settings on the virtual machine", "order": 7 }, "secrets": { "type": "array", "title": "Secrets", "description": "Specifies set of certificates that should be installed onto the virtual machine", "items": { "type": "object" }, "order": 5 }, "windowsConfiguration": { "$ref": "#/definitions/windowsConfiguration", "title": "Windows Configuration", "description": "Specifies windows operating system settings on the virtual machine", "order": 6 } }, "definitions": { "additionalUnattendContent": { "type": "object", "title": "additionalUnattendContent", "properties": { "component": { "type": "string", "title": "Component", "description": "Specifies the name of the component to configure with the added content", "order": 1 }, "content": { "type": "string", "title": "Content", "description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component", "order": 2 }, "pass": { "type": "string", "title": "Pass", "description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem", "order": 3 }, "settingName": { "type": "string", "title": "Setting Name", "description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon", "order": 4 } } }, "linuxConfiguration": { "type": "object", "title": "linuxConfiguration", "properties": { "disablePasswordAuthentication": { "type": "boolean", "title": "Disable Password Authentication", "description": "Specifies whether password authentication should be disabled", "order": 1 }, "ssh": { "$ref": "#/definitions/ssh", "title": "SSH", "description": "Specifies a collection of keys to be placed on the virtual machine", "order": 2 } }, "definitions": { "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } }, "ssh": { "type": "object", "title": "ssh", "properties": { "publicKeys": { "type": "array", "title": "Public Keys", "description": "Specifies a collection of keys to be placed on the virtual machine", "items": { "$ref": "#/definitions/publicKeys" }, "order": 1 } }, "definitions": { "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } } } } } }, "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } }, "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } }, "ssh": { "type": "object", "title": "ssh", "properties": { "publicKeys": { "type": "array", "title": "Public Keys", "description": "Specifies a collection of keys to be placed on the virtual machine", "items": { "$ref": "#/definitions/publicKeys" }, "order": 1 } }, "definitions": { "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } } } }, "winRM": { "type": "object", "title": "winRM", "properties": { "listeners": { "type": "array", "title": "Listeners", "items": { "$ref": "#/definitions/listeners" }, "order": 1 } }, "definitions": { "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } } } }, "windowsConfiguration": { "type": "object", "title": "windowsConfiguration", "properties": { "additionalUnattendContent": { "$ref": "#/definitions/additionalUnattendContent", "title": "Additional Unattend Content", "description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup", "order": 1 }, "enableAutomaticUpdates": { "type": "boolean", "title": "Enable Automatic Updates", "description": "Indicates whether virtual machine is enabled for automatic updates", "order": 2 }, "provisionVMAgent": { "type": "boolean", "title": "Provision VM Agent", "description": "Indicates whether virtual machine agent should be provisioned on the virtual machine", "order": 3 }, "winRM": { "$ref": "#/definitions/winRM", "title": "Win RM", "description": "Specifies the windows remote management listeners, this enables remote windows powershell", "order": 4 }, "winrRMListener": { "$ref": "#/definitions/listeners", "title": "WinrRM Listener", "description": "Contains configuration settings for the windows remote management service on the virtual machine", "order": 5 } }, "definitions": { "additionalUnattendContent": { "type": "object", "title": "additionalUnattendContent", "properties": { "component": { "type": "string", "title": "Component", "description": "Specifies the name of the component to configure with the added content", "order": 1 }, "content": { "type": "string", "title": "Content", "description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component", "order": 2 }, "pass": { "type": "string", "title": "Pass", "description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem", "order": 3 }, "settingName": { "type": "string", "title": "Setting Name", "description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon", "order": 4 } } }, "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } }, "winRM": { "type": "object", "title": "winRM", "properties": { "listeners": { "type": "array", "title": "Listeners", "items": { "$ref": "#/definitions/listeners" }, "order": 1 } }, "definitions": { "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } } } } } } } }, "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } }, "ssh": { "type": "object", "title": "ssh", "properties": { "publicKeys": { "type": "array", "title": "Public Keys", "description": "Specifies a collection of keys to be placed on the virtual machine", "items": { "$ref": "#/definitions/publicKeys" }, "order": 1 } }, "definitions": { "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } } } }, "storageProfile": { "type": "object", "title": "storageProfile", "properties": { "dataDisks": { "type": "array", "title": "Data Disks", "description": "Specifies the parameters that are used to add a data disk to a virtual machine", "items": { "type": "object" }, "order": 1 }, "imageReference": { "$ref": "#/definitions/imageReference", "title": "Image Reference", "description": "Specifies information about the image to use", "order": 2 }, "osDisk": { "$ref": "#/definitions/osDisk", "title": "OS Disk", "description": "Specifies information about the operating system disk used by the virtual machine", "order": 3 } }, "definitions": { "imageReference": { "type": "object", "title": "imageReference", "properties": { "id": { "type": "string", "title": "Image Reference", "description": "Specifies the resource identifier of a virtual machine image in your subscription", "order": 1 }, "offer": { "type": "string", "title": "Offer", "description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine", "order": 2 }, "publisher": { "type": "string", "title": "Publisher", "description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine", "order": 3 }, "sku": { "type": "string", "title": "SKU", "description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine", "order": 4 }, "version": { "type": "string", "title": "Version", "description": "Specifies the version of the platform image or marketplace image used to create the virtual machine", "order": 5 } } }, "managedDisk": { "type": "object", "title": "managedDisk", "properties": { "Id": { "type": "string", "title": "ID", "description": "Specifies the resource identifier of the managed disk", "order": 1 }, "storageAccountType": { "type": "string", "title": "Storage Account Type", "description": "Specifies the storage account type for the managed disk", "order": 2 } } }, "osDisk": { "type": "object", "title": "osDisk", "properties": { "caching": { "type": "string", "title": "Caching", "description": "Specifies the caching requirements", "order": 1 }, "createOption": { "type": "string", "title": "Create Option", "description": "Specifies how the virtual machine should be created", "order": 2 }, "managedDisk": { "$ref": "#/definitions/managedDisk", "title": "Managed Disk", "description": "Specified the identifier and optional storage account type for the disk", "order": 3 }, "name": { "type": "string", "title": "Name", "description": "Specifies the disk name", "order": 4 }, "osType": { "type": "string", "title": "OS Type", "description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd", "order": 5 }, "vhd": { "$ref": "#/definitions/vhd", "title": "VHD", "description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed", "order": 6 } }, "definitions": { "managedDisk": { "type": "object", "title": "managedDisk", "properties": { "Id": { "type": "string", "title": "ID", "description": "Specifies the resource identifier of the managed disk", "order": 1 }, "storageAccountType": { "type": "string", "title": "Storage Account Type", "description": "Specifies the storage account type for the managed disk", "order": 2 } } }, "vhd": { "type": "object", "title": "vhd", "properties": { "uri": { "type": "string", "title": "VHD", "description": "Specifies the vhd uri", "order": 1 } } } } }, "vhd": { "type": "object", "title": "vhd", "properties": { "uri": { "type": "string", "title": "VHD", "description": "Specifies the vhd uri", "order": 1 } } } } }, "vhd": { "type": "object", "title": "vhd", "properties": { "uri": { "type": "string", "title": "VHD", "description": "Specifies the vhd uri", "order": 1 } } }, "winRM": { "type": "object", "title": "winRM", "properties": { "listeners": { "type": "array", "title": "Listeners", "items": { "$ref": "#/definitions/listeners" }, "order": 1 } }, "definitions": { "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } } } }, "windowsConfiguration": { "type": "object", "title": "windowsConfiguration", "properties": { "additionalUnattendContent": { "$ref": "#/definitions/additionalUnattendContent", "title": "Additional Unattend Content", "description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup", "order": 1 }, "enableAutomaticUpdates": { "type": "boolean", "title": "Enable Automatic Updates", "description": "Indicates whether virtual machine is enabled for automatic updates", "order": 2 }, "provisionVMAgent": { "type": "boolean", "title": "Provision VM Agent", "description": "Indicates whether virtual machine agent should be provisioned on the virtual machine", "order": 3 }, "winRM": { "$ref": "#/definitions/winRM", "title": "Win RM", "description": "Specifies the windows remote management listeners, this enables remote windows powershell", "order": 4 }, "winrRMListener": { "$ref": "#/definitions/listeners", "title": "WinrRM Listener", "description": "Contains configuration settings for the windows remote management service on the virtual machine", "order": 5 } }, "definitions": { "additionalUnattendContent": { "type": "object", "title": "additionalUnattendContent", "properties": { "component": { "type": "string", "title": "Component", "description": "Specifies the name of the component to configure with the added content", "order": 1 }, "content": { "type": "string", "title": "Content", "description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component", "order": 2 }, "pass": { "type": "string", "title": "Pass", "description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem", "order": 3 }, "settingName": { "type": "string", "title": "Setting Name", "description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon", "order": 4 } } }, "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } }, "winRM": { "type": "object", "title": "winRM", "properties": { "listeners": { "type": "array", "title": "Listeners", "items": { "$ref": "#/definitions/listeners" }, "order": 1 } }, "definitions": { "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } } } } } } } }, "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } }, "ssh": { "type": "object", "title": "ssh", "properties": { "publicKeys": { "type": "array", "title": "Public Keys", "description": "Specifies a collection of keys to be placed on the virtual machine", "items": { "$ref": "#/definitions/publicKeys" }, "order": 1 } }, "definitions": { "publicKeys": { "type": "object", "title": "publicKeys", "properties": { "keyData": { "type": "string", "title": "Key Data", "description": "SSH public key certificate used to authenticate with the vm through ssh", "order": 1 }, "path": { "type": "string", "title": "Path", "description": "Specifies the full path on the created VM where ssh public key is stored", "order": 2 } } } } }, "storageProfile": { "type": "object", "title": "storageProfile", "properties": { "dataDisks": { "type": "array", "title": "Data Disks", "description": "Specifies the parameters that are used to add a data disk to a virtual machine", "items": { "type": "object" }, "order": 1 }, "imageReference": { "$ref": "#/definitions/imageReference", "title": "Image Reference", "description": "Specifies information about the image to use", "order": 2 }, "osDisk": { "$ref": "#/definitions/osDisk", "title": "OS Disk", "description": "Specifies information about the operating system disk used by the virtual machine", "order": 3 } }, "definitions": { "imageReference": { "type": "object", "title": "imageReference", "properties": { "id": { "type": "string", "title": "Image Reference", "description": "Specifies the resource identifier of a virtual machine image in your subscription", "order": 1 }, "offer": { "type": "string", "title": "Offer", "description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine", "order": 2 }, "publisher": { "type": "string", "title": "Publisher", "description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine", "order": 3 }, "sku": { "type": "string", "title": "SKU", "description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine", "order": 4 }, "version": { "type": "string", "title": "Version", "description": "Specifies the version of the platform image or marketplace image used to create the virtual machine", "order": 5 } } }, "managedDisk": { "type": "object", "title": "managedDisk", "properties": { "Id": { "type": "string", "title": "ID", "description": "Specifies the resource identifier of the managed disk", "order": 1 }, "storageAccountType": { "type": "string", "title": "Storage Account Type", "description": "Specifies the storage account type for the managed disk", "order": 2 } } }, "osDisk": { "type": "object", "title": "osDisk", "properties": { "caching": { "type": "string", "title": "Caching", "description": "Specifies the caching requirements", "order": 1 }, "createOption": { "type": "string", "title": "Create Option", "description": "Specifies how the virtual machine should be created", "order": 2 }, "managedDisk": { "$ref": "#/definitions/managedDisk", "title": "Managed Disk", "description": "Specified the identifier and optional storage account type for the disk", "order": 3 }, "name": { "type": "string", "title": "Name", "description": "Specifies the disk name", "order": 4 }, "osType": { "type": "string", "title": "OS Type", "description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd", "order": 5 }, "vhd": { "$ref": "#/definitions/vhd", "title": "VHD", "description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed", "order": 6 } }, "definitions": { "managedDisk": { "type": "object", "title": "managedDisk", "properties": { "Id": { "type": "string", "title": "ID", "description": "Specifies the resource identifier of the managed disk", "order": 1 }, "storageAccountType": { "type": "string", "title": "Storage Account Type", "description": "Specifies the storage account type for the managed disk", "order": 2 } } }, "vhd": { "type": "object", "title": "vhd", "properties": { "uri": { "type": "string", "title": "VHD", "description": "Specifies the vhd uri", "order": 1 } } } } }, "vhd": { "type": "object", "title": "vhd", "properties": { "uri": { "type": "string", "title": "VHD", "description": "Specifies the vhd uri", "order": 1 } } } } }, "tags": { "type": "object", "title": "tags", "properties": { "tags": { "type": "object", "title": "Tags", "description": "Tags", "order": 1 } } }, "vhd": { "type": "object", "title": "vhd", "properties": { "uri": { "type": "string", "title": "VHD", "description": "Specifies the vhd uri", "order": 1 } } }, "winRM": { "type": "object", "title": "winRM", "properties": { "listeners": { "type": "array", "title": "Listeners", "items": { "$ref": "#/definitions/listeners" }, "order": 1 } }, "definitions": { "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } } } }, "windowsConfiguration": { "type": "object", "title": "windowsConfiguration", "properties": { "additionalUnattendContent": { "$ref": "#/definitions/additionalUnattendContent", "title": "Additional Unattend Content", "description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup", "order": 1 }, "enableAutomaticUpdates": { "type": "boolean", "title": "Enable Automatic Updates", "description": "Indicates whether virtual machine is enabled for automatic updates", "order": 2 }, "provisionVMAgent": { "type": "boolean", "title": "Provision VM Agent", "description": "Indicates whether virtual machine agent should be provisioned on the virtual machine", "order": 3 }, "winRM": { "$ref": "#/definitions/winRM", "title": "Win RM", "description": "Specifies the windows remote management listeners, this enables remote windows powershell", "order": 4 }, "winrRMListener": { "$ref": "#/definitions/listeners", "title": "WinrRM Listener", "description": "Contains configuration settings for the windows remote management service on the virtual machine", "order": 5 } }, "definitions": { "additionalUnattendContent": { "type": "object", "title": "additionalUnattendContent", "properties": { "component": { "type": "string", "title": "Component", "description": "Specifies the name of the component to configure with the added content", "order": 1 }, "content": { "type": "string", "title": "Content", "description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component", "order": 2 }, "pass": { "type": "string", "title": "Pass", "description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem", "order": 3 }, "settingName": { "type": "string", "title": "Setting Name", "description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon", "order": 4 } } }, "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } }, "winRM": { "type": "object", "title": "winRM", "properties": { "listeners": { "type": "array", "title": "Listeners", "items": { "$ref": "#/definitions/listeners" }, "order": 1 } }, "definitions": { "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } } } } } } } }, "vhd": { "type": "object", "title": "vhd", "properties": { "uri": { "type": "string", "title": "VHD", "description": "Specifies the vhd uri", "order": 1 } } }, "winRM": { "type": "object", "title": "winRM", "properties": { "listeners": { "type": "array", "title": "Listeners", "items": { "$ref": "#/definitions/listeners" }, "order": 1 } }, "definitions": { "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } } } }, "windowsConfiguration": { "type": "object", "title": "windowsConfiguration", "properties": { "additionalUnattendContent": { "$ref": "#/definitions/additionalUnattendContent", "title": "Additional Unattend Content", "description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup", "order": 1 }, "enableAutomaticUpdates": { "type": "boolean", "title": "Enable Automatic Updates", "description": "Indicates whether virtual machine is enabled for automatic updates", "order": 2 }, "provisionVMAgent": { "type": "boolean", "title": "Provision VM Agent", "description": "Indicates whether virtual machine agent should be provisioned on the virtual machine", "order": 3 }, "winRM": { "$ref": "#/definitions/winRM", "title": "Win RM", "description": "Specifies the windows remote management listeners, this enables remote windows powershell", "order": 4 }, "winrRMListener": { "$ref": "#/definitions/listeners", "title": "WinrRM Listener", "description": "Contains configuration settings for the windows remote management service on the virtual machine", "order": 5 } }, "definitions": { "additionalUnattendContent": { "type": "object", "title": "additionalUnattendContent", "properties": { "component": { "type": "string", "title": "Component", "description": "Specifies the name of the component to configure with the added content", "order": 1 }, "content": { "type": "string", "title": "Content", "description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component", "order": 2 }, "pass": { "type": "string", "title": "Pass", "description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem", "order": 3 }, "settingName": { "type": "string", "title": "Setting Name", "description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon", "order": 4 } } }, "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } }, "winRM": { "type": "object", "title": "winRM", "properties": { "listeners": { "type": "array", "title": "Listeners", "items": { "$ref": "#/definitions/listeners" }, "order": 1 } }, "definitions": { "listeners": { "type": "object", "title": "listeners", "properties": { "certificateUrl": { "type": "string", "title": "Certificate Url", "description": "Specifies url of the certificate with which new virtual machines is provisioned", "order": 1 }, "protocol": { "type": "string", "title": "Protocol", "description": "Specifies the protocol of listener", "order": 2 } } } } } } } } } """) def __init__(self): super(self.__class__, self).__init__(self.schema)
azure_compute/komand_azure_compute/actions/list_vm/schema.py
176,147
GENERATED BY KOMAND SDK - DO NOT EDIT
37
en
0.775658
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): """Run administrative tasks.""" os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'potato.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
manage.py
662
Run administrative tasks. Django's command-line utility for administrative tasks. !/usr/bin/env python
103
en
0.725633
# coding: utf-8 import logging import os import shutil import sys import tempfile import unittest import pytest import fiona logging.basicConfig(stream=sys.stderr, level=logging.INFO) class UnicodePathTest(unittest.TestCase): def setUp(self): tempdir = tempfile.mkdtemp() self.dir = os.path.join(tempdir, 'français') shutil.copytree('tests/data/', self.dir) def tearDown(self): shutil.rmtree(os.path.dirname(self.dir)) def test_unicode_path(self): path = self.dir + '/coutwildrnp.shp' if sys.version_info < (3,): path = path.decode('utf-8') with fiona.open(path) as c: assert len(c) == 67 def test_unicode_path_layer(self): path = self.dir layer = 'coutwildrnp' if sys.version_info < (3,): path = path.decode('utf-8') layer = layer.decode('utf-8') with fiona.open(path, layer=layer) as c: assert len(c) == 67 def test_utf8_path(self): path = self.dir + '/coutwildrnp.shp' if sys.version_info < (3,): with fiona.open(path) as c: assert len(c) == 67 class UnicodeStringFieldTest(unittest.TestCase): def setUp(self): self.tempdir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.tempdir) @pytest.mark.xfail(reason="OGR silently fails to convert strings") def test_write_mismatch(self): """TOFIX: OGR silently fails to convert strings""" # Details: # # If we tell OGR that we want a latin-1 encoded output file and # give it a feature with a unicode property that can't be converted # to latin-1, no error is raised and OGR just writes the utf-8 # encoded bytes to the output file. # # This might be shapefile specific. # # Consequences: no error on write, but there will be an error # on reading the data and expecting latin-1. schema = { 'geometry': 'Point', 'properties': {'label': 'str', 'num': 'int'}} with fiona.open(os.path.join(self.tempdir, "test-write-fail.shp"), 'w', driver="ESRI Shapefile", schema=schema, encoding='latin1') as c: c.writerecords([{ 'type': 'Feature', 'geometry': {'type': 'Point', 'coordinates': [0, 0]}, 'properties': { 'label': u'徐汇区', 'num': 0}}]) with fiona.open(os.path.join(self.tempdir), encoding='latin1') as c: f = next(iter(c)) # Next assert fails. self.assertEqual(f['properties']['label'], u'徐汇区') def test_write_utf8(self): schema = { 'geometry': 'Point', 'properties': {'label': 'str', u'verit\xe9': 'int'}} with fiona.open(os.path.join(self.tempdir, "test-write.shp"), "w", "ESRI Shapefile", schema=schema, encoding='utf-8') as c: c.writerecords([{ 'type': 'Feature', 'geometry': {'type': 'Point', 'coordinates': [0, 0]}, 'properties': { 'label': u'Ba\u2019kelalan', u'verit\xe9': 0}}]) with fiona.open(os.path.join(self.tempdir), encoding='utf-8') as c: f = next(iter(c)) self.assertEqual(f['properties']['label'], u'Ba\u2019kelalan') self.assertEqual(f['properties'][u'verit\xe9'], 0) def test_write_gb18030(self): """Can write a simplified Chinese shapefile""" schema = { 'geometry': 'Point', 'properties': {'label': 'str', 'num': 'int'}} with fiona.open(os.path.join(self.tempdir, "test-write-gb18030.shp"), 'w', driver="ESRI Shapefile", schema=schema, encoding='gb18030') as c: c.writerecords([{ 'type': 'Feature', 'geometry': {'type': 'Point', 'coordinates': [0, 0]}, 'properties': {'label': u'徐汇区', 'num': 0}}]) with fiona.open(os.path.join(self.tempdir), encoding='gb18030') as c: f = next(iter(c)) self.assertEqual(f['properties']['label'], u'徐汇区') self.assertEqual(f['properties']['num'], 0)
tests/test_unicode.py
4,392
Can write a simplified Chinese shapefile TOFIX: OGR silently fails to convert strings coding: utf-8 Details: If we tell OGR that we want a latin-1 encoded output file and give it a feature with a unicode property that can't be converted to latin-1, no error is raised and OGR just writes the utf-8 encoded bytes to the output file. This might be shapefile specific. Consequences: no error on write, but there will be an error on reading the data and expecting latin-1. Next assert fails.
489
en
0.893415
from setuptools import setup, find_packages __author__ = 'Giulio Rossetti' __license__ = "BSD 2 Clause" __email__ = "[email protected]" # Get the long description from the README file # with open(path.join(here, 'README.md'), encoding='utf-8') as f: # long_description = f.read() setup(name='demon', version='2.0.4', license='BSD-2-Clause', description='Community Discovery algorithm', url='https://github.com/GiulioRossetti/DEMON', author='Giulio Rossetti', author_email='[email protected]', use_2to3=True, classifiers=[ # How mature is this project? Common values are # 3 - Alpha # 4 - Beta # 5 - Production/Stable 'Development Status :: 5 - Production/Stable', # Indicate who your project is intended for 'Intended Audience :: Developers', 'Topic :: Software Development :: Build Tools', # Pick your license as you wish (should match "license" above) 'License :: OSI Approved :: BSD License', "Operating System :: OS Independent", # Specify the Python versions you support here. In particular, ensure # that you indicate whether you support Python 2, Python 3 or both. 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3' ], keywords=['complex-networks', 'community discovery'], install_requires=['networkx', 'future', ''], packages=find_packages(exclude=["*.test", "*.test.*", "test.*", "test", "demon.test", "demon.test.*"]), )
setup.py
1,664
Get the long description from the README file with open(path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() How mature is this project? Common values are 3 - Alpha 4 - Beta 5 - Production/Stable Indicate who your project is intended for Pick your license as you wish (should match "license" above) Specify the Python versions you support here. In particular, ensure that you indicate whether you support Python 2, Python 3 or both.
470
en
0.822206
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging import unittest from ml.rl.models.example_sequence_model import ExampleSequenceModel from ml.rl.test.models.test_utils import check_save_load logger = logging.getLogger(__name__) class TestExampleSequenceModel(unittest.TestCase): def test_basic(self): state_dim = 8 model = ExampleSequenceModel(state_dim) input = model.input_prototype() output = model(input) self.assertEqual((1, 1), output.value.shape) def test_save_load(self): state_dim = 8 model = ExampleSequenceModel(state_dim) # ONNX sure exports a lot of parameters... expected_num_params, expected_num_inputs, expected_num_outputs = 133, 3, 1 check_save_load( self, model, expected_num_params, expected_num_inputs, expected_num_outputs )
ml/rl/test/models/test_sequence_model.py
923
!/usr/bin/env python3 Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. ONNX sure exports a lot of parameters...
132
en
0.824334
# -------------------------------------------------------- # Deformable Convolutional Networks # Copyright (c) 2017 Microsoft # Copyright (c) 2019 IBM Corp # Licensed under The Apache-2.0 License [see LICENSE for details] # Written by Haozhi Qi # -------------------------------------------------------- import cPickle import mxnet as mx from utils.symbol import Symbol from operator_py.pyramid_proposal import * from operator_py.proposal_target import * from operator_py.fpn_roi_pooling import * from operator_py.box_annotator_ohem import * class resnet_v1_101_fpn_dcn_rcnn(Symbol): def __init__(self): """ Use __init__ to define parameter network needs """ self.shared_param_list = ['offset_p2', 'offset_p3', 'offset_p4', 'offset_p5', 'rpn_conv', 'rpn_cls_score', 'rpn_bbox_pred'] self.shared_param_dict = {} for name in self.shared_param_list: self.shared_param_dict[name + '_weight'] = mx.sym.Variable(name + '_weight') self.shared_param_dict[name + '_bias'] = mx.sym.Variable(name + '_bias') def get_resnet_backbone(self, data, with_dilated=False, with_dconv=False, with_dpyramid=False, eps=1e-5): conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2), no_bias=True) bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=eps) scale_conv1 = bn_conv1 conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu') pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3), stride=(2, 2), pool_type='max') res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=eps) scale2a_branch1 = bn2a_branch1 res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale2a_branch2a = bn2a_branch2a res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu') res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale2a_branch2b = bn2a_branch2b res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu') res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale2a_branch2c = bn2a_branch2c res2a = mx.symbol.broadcast_add(name='res2a', *[scale2a_branch1, scale2a_branch2c]) res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu') res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale2b_branch2a = bn2b_branch2a res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu') res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale2b_branch2b = bn2b_branch2b res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu') res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale2b_branch2c = bn2b_branch2c res2b = mx.symbol.broadcast_add(name='res2b', *[res2a_relu, scale2b_branch2c]) res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu') res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale2c_branch2a = bn2c_branch2a res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu') res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale2c_branch2b = bn2c_branch2b res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu') res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale2c_branch2c = bn2c_branch2c res2c = mx.symbol.broadcast_add(name='res2c', *[res2b_relu, scale2c_branch2c]) res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu') res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True, fix_gamma=False, eps=eps) scale3a_branch1 = bn3a_branch1 res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale3a_branch2a = bn3a_branch2a res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu') res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale3a_branch2b = bn3a_branch2b res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu') res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale3a_branch2c = bn3a_branch2c res3a = mx.symbol.broadcast_add(name='res3a', *[scale3a_branch1, scale3a_branch2c]) res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu') res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale3b1_branch2a = bn3b1_branch2a res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a, act_type='relu') res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale3b1_branch2b = bn3b1_branch2b res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b, act_type='relu') res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale3b1_branch2c = bn3b1_branch2c res3b1 = mx.symbol.broadcast_add(name='res3b1', *[res3a_relu, scale3b1_branch2c]) res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu') res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale3b2_branch2a = bn3b2_branch2a res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a, act_type='relu') res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale3b2_branch2b = bn3b2_branch2b res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b, act_type='relu') res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale3b2_branch2c = bn3b2_branch2c res3b2 = mx.symbol.broadcast_add(name='res3b2', *[res3b1_relu, scale3b2_branch2c]) res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu') res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale3b3_branch2a = bn3b3_branch2a res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a, act_type='relu') if with_dpyramid: res3b3_branch2b_offset = mx.symbol.Convolution(name='res3b3_branch2b_offset', data=res3b3_branch2a_relu, num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1)) res3b3_branch2b = mx.contrib.symbol.DeformableConvolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, offset=res3b3_branch2b_offset, num_filter=128, pad=(1, 1), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), no_bias=True) else: res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale3b3_branch2b = bn3b3_branch2b res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b, act_type='relu') res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale3b3_branch2c = bn3b3_branch2c res3b3 = mx.symbol.broadcast_add(name='res3b3', *[res3b2_relu, scale3b3_branch2c]) res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu') res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True, fix_gamma=False, eps=eps) scale4a_branch1 = bn4a_branch1 res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True) bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4a_branch2a = bn4a_branch2a res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu') res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4a_branch2b = bn4a_branch2b res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu') res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4a_branch2c = bn4a_branch2c res4a = mx.symbol.broadcast_add(name='res4a', *[scale4a_branch1, scale4a_branch2c]) res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu') res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b1_branch2a = bn4b1_branch2a res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a, act_type='relu') res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b1_branch2b = bn4b1_branch2b res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b, act_type='relu') res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b1_branch2c = bn4b1_branch2c res4b1 = mx.symbol.broadcast_add(name='res4b1', *[res4a_relu, scale4b1_branch2c]) res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu') res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b2_branch2a = bn4b2_branch2a res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a, act_type='relu') res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b2_branch2b = bn4b2_branch2b res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b, act_type='relu') res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b2_branch2c = bn4b2_branch2c res4b2 = mx.symbol.broadcast_add(name='res4b2', *[res4b1_relu, scale4b2_branch2c]) res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu') res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b3_branch2a = bn4b3_branch2a res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a, act_type='relu') res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b3_branch2b = bn4b3_branch2b res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b, act_type='relu') res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b3_branch2c = bn4b3_branch2c res4b3 = mx.symbol.broadcast_add(name='res4b3', *[res4b2_relu, scale4b3_branch2c]) res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu') res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b4_branch2a = bn4b4_branch2a res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a, act_type='relu') res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b4_branch2b = bn4b4_branch2b res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b, act_type='relu') res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b4_branch2c = bn4b4_branch2c res4b4 = mx.symbol.broadcast_add(name='res4b4', *[res4b3_relu, scale4b4_branch2c]) res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu') res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b5_branch2a = bn4b5_branch2a res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a, act_type='relu') res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b5_branch2b = bn4b5_branch2b res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b, act_type='relu') res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b5_branch2c = bn4b5_branch2c res4b5 = mx.symbol.broadcast_add(name='res4b5', *[res4b4_relu, scale4b5_branch2c]) res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu') res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b6_branch2a = bn4b6_branch2a res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a, act_type='relu') res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b6_branch2b = bn4b6_branch2b res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b, act_type='relu') res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b6_branch2c = bn4b6_branch2c res4b6 = mx.symbol.broadcast_add(name='res4b6', *[res4b5_relu, scale4b6_branch2c]) res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu') res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b7_branch2a = bn4b7_branch2a res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a, act_type='relu') res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b7_branch2b = bn4b7_branch2b res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b, act_type='relu') res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b7_branch2c = bn4b7_branch2c res4b7 = mx.symbol.broadcast_add(name='res4b7', *[res4b6_relu, scale4b7_branch2c]) res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu') res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b8_branch2a = bn4b8_branch2a res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a, act_type='relu') res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b8_branch2b = bn4b8_branch2b res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b, act_type='relu') res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b8_branch2c = bn4b8_branch2c res4b8 = mx.symbol.broadcast_add(name='res4b8', *[res4b7_relu, scale4b8_branch2c]) res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu') res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b9_branch2a = bn4b9_branch2a res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a, act_type='relu') res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b9_branch2b = bn4b9_branch2b res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b, act_type='relu') res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b9_branch2c = bn4b9_branch2c res4b9 = mx.symbol.broadcast_add(name='res4b9', *[res4b8_relu, scale4b9_branch2c]) res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu') res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b10_branch2a = bn4b10_branch2a res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a, act_type='relu') res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b10_branch2b = bn4b10_branch2b res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b, act_type='relu') res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b10_branch2c = bn4b10_branch2c res4b10 = mx.symbol.broadcast_add(name='res4b10', *[res4b9_relu, scale4b10_branch2c]) res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu') res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b11_branch2a = bn4b11_branch2a res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a, act_type='relu') res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b11_branch2b = bn4b11_branch2b res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b, act_type='relu') res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b11_branch2c = bn4b11_branch2c res4b11 = mx.symbol.broadcast_add(name='res4b11', *[res4b10_relu, scale4b11_branch2c]) res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu') res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b12_branch2a = bn4b12_branch2a res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a, act_type='relu') res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b12_branch2b = bn4b12_branch2b res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b, act_type='relu') res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b12_branch2c = bn4b12_branch2c res4b12 = mx.symbol.broadcast_add(name='res4b12', *[res4b11_relu, scale4b12_branch2c]) res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu') res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b13_branch2a = bn4b13_branch2a res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a, act_type='relu') res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b13_branch2b = bn4b13_branch2b res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b, act_type='relu') res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b13_branch2c = bn4b13_branch2c res4b13 = mx.symbol.broadcast_add(name='res4b13', *[res4b12_relu, scale4b13_branch2c]) res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu') res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b14_branch2a = bn4b14_branch2a res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a, act_type='relu') res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b14_branch2b = bn4b14_branch2b res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b, act_type='relu') res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b14_branch2c = bn4b14_branch2c res4b14 = mx.symbol.broadcast_add(name='res4b14', *[res4b13_relu, scale4b14_branch2c]) res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu') res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b15_branch2a = bn4b15_branch2a res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a, act_type='relu') res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b15_branch2b = bn4b15_branch2b res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b, act_type='relu') res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b15_branch2c = bn4b15_branch2c res4b15 = mx.symbol.broadcast_add(name='res4b15', *[res4b14_relu, scale4b15_branch2c]) res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu') res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b16_branch2a = bn4b16_branch2a res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a, act_type='relu') res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b16_branch2b = bn4b16_branch2b res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b, act_type='relu') res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b16_branch2c = bn4b16_branch2c res4b16 = mx.symbol.broadcast_add(name='res4b16', *[res4b15_relu, scale4b16_branch2c]) res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu') res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b17_branch2a = bn4b17_branch2a res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a, act_type='relu') res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b17_branch2b = bn4b17_branch2b res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b, act_type='relu') res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b17_branch2c = bn4b17_branch2c res4b17 = mx.symbol.broadcast_add(name='res4b17', *[res4b16_relu, scale4b17_branch2c]) res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu') res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b18_branch2a = bn4b18_branch2a res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a, act_type='relu') res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b18_branch2b = bn4b18_branch2b res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b, act_type='relu') res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b18_branch2c = bn4b18_branch2c res4b18 = mx.symbol.broadcast_add(name='res4b18', *[res4b17_relu, scale4b18_branch2c]) res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu') res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b19_branch2a = bn4b19_branch2a res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a, act_type='relu') res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b19_branch2b = bn4b19_branch2b res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b, act_type='relu') res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b19_branch2c = bn4b19_branch2c res4b19 = mx.symbol.broadcast_add(name='res4b19', *[res4b18_relu, scale4b19_branch2c]) res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu') res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b20_branch2a = bn4b20_branch2a res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a, act_type='relu') res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b20_branch2b = bn4b20_branch2b res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b, act_type='relu') res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b20_branch2c = bn4b20_branch2c res4b20 = mx.symbol.broadcast_add(name='res4b20', *[res4b19_relu, scale4b20_branch2c]) res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu') res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b21_branch2a = bn4b21_branch2a res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a, act_type='relu') res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b21_branch2b = bn4b21_branch2b res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b, act_type='relu') res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b21_branch2c = bn4b21_branch2c res4b21 = mx.symbol.broadcast_add(name='res4b21', *[res4b20_relu, scale4b21_branch2c]) res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu') res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale4b22_branch2a = bn4b22_branch2a res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a, act_type='relu') if with_dpyramid: res4b22_branch2b_offset = mx.symbol.Convolution(name='res4b22_branch2b_offset', data=res4b22_branch2a_relu, num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1)) res4b22_branch2b = mx.contrib.symbol.DeformableConvolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, offset=res4b22_branch2b_offset, num_filter=256, pad=(1, 1), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), no_bias=True) else: res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True) bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale4b22_branch2b = bn4b22_branch2b res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b, act_type='relu') res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale4b22_branch2c = bn4b22_branch2c res4b22 = mx.symbol.broadcast_add(name='res4b22', *[res4b21_relu, scale4b22_branch2c]) res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu') if with_dilated: res5_stride = (1, 1) res5_dilate = (2, 2) else: res5_stride = (2, 2) res5_dilate = (1, 1) # res5a-bottleneck res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=res4b22_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True) bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale5a_branch2a = bn5a_branch2a res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu') if with_dconv: res5a_branch2b_offset = mx.symbol.Convolution(name='res5a_branch2b_offset', data=res5a_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate) res5a_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5a_branch2b', data=res5a_branch2a_relu, offset=res5a_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=res5_dilate, no_bias=True) else: res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True) bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale5a_branch2b = bn5a_branch2b res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu') res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale5a_branch2c = bn5a_branch2c # res5a-shortcut res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=res4b22_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True) bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=eps) scale5a_branch1 = bn5a_branch1 res5a = mx.symbol.broadcast_add(name='res5a', *[scale5a_branch1, scale5a_branch2c]) res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu') # res5b-bottleneck res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale5b_branch2a = bn5b_branch2a res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu') if with_dconv: res5b_branch2b_offset = mx.symbol.Convolution(name='res5b_branch2b_offset', data=res5b_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate) res5b_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5b_branch2b', data=res5b_branch2a_relu, offset=res5b_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True) else: res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True) bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale5b_branch2b = bn5b_branch2b res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu') res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale5b_branch2c = bn5b_branch2c # res5b-shortcut res5b = mx.symbol.broadcast_add(name='res5b', *[res5a_relu, scale5b_branch2c]) res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu') # res5c-bottleneck res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True, fix_gamma=False, eps=eps) scale5c_branch2a = bn5c_branch2a res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu') if with_dconv: res5c_branch2b_offset = mx.symbol.Convolution(name='res5c_branch2b_offset', data=res5c_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate) res5c_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5c_branch2b', data=res5c_branch2a_relu, offset=res5c_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True) else: res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True) bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps) scale5c_branch2b = bn5c_branch2b res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu') res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True) bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps) scale5c_branch2c = bn5c_branch2c # res5c-shortcut res5c = mx.symbol.broadcast_add(name='res5c', *[res5b_relu, scale5c_branch2c]) res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu') return res2c_relu, res3b3_relu, res4b22_relu, res5c_relu def get_fpn_feature(self, c2, c3, c4, c5, feature_dim=256): # lateral connection fpn_p5_1x1 = mx.symbol.Convolution(data=c5, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p5_1x1') fpn_p4_1x1 = mx.symbol.Convolution(data=c4, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p4_1x1') fpn_p3_1x1 = mx.symbol.Convolution(data=c3, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p3_1x1') fpn_p2_1x1 = mx.symbol.Convolution(data=c2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p2_1x1') # top-down connection fpn_p5_upsample = mx.symbol.UpSampling(fpn_p5_1x1, scale=2, sample_type='nearest', name='fpn_p5_upsample') fpn_p4_plus = mx.sym.ElementWiseSum(*[fpn_p5_upsample, fpn_p4_1x1], name='fpn_p4_sum') fpn_p4_upsample = mx.symbol.UpSampling(fpn_p4_plus, scale=2, sample_type='nearest', name='fpn_p4_upsample') fpn_p3_plus = mx.sym.ElementWiseSum(*[fpn_p4_upsample, fpn_p3_1x1], name='fpn_p3_sum') fpn_p3_upsample = mx.symbol.UpSampling(fpn_p3_plus, scale=2, sample_type='nearest', name='fpn_p3_upsample') fpn_p2_plus = mx.sym.ElementWiseSum(*[fpn_p3_upsample, fpn_p2_1x1], name='fpn_p2_sum') # FPN feature fpn_p6 = mx.sym.Convolution(data=c5, kernel=(3, 3), pad=(1, 1), stride=(2, 2), num_filter=feature_dim, name='fpn_p6') fpn_p5 = mx.symbol.Convolution(data=fpn_p5_1x1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p5') fpn_p4 = mx.symbol.Convolution(data=fpn_p4_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p4') fpn_p3 = mx.symbol.Convolution(data=fpn_p3_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p3') fpn_p2 = mx.symbol.Convolution(data=fpn_p2_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p2') return fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6 def get_rpn_subnet(self, data, num_anchors, suffix): rpn_conv = mx.sym.Convolution(data=data, kernel=(3, 3), pad=(1, 1), num_filter=512, name='rpn_conv_' + suffix, weight=self.shared_param_dict['rpn_conv_weight'], bias=self.shared_param_dict['rpn_conv_bias']) rpn_relu = mx.sym.Activation(data=rpn_conv, act_type='relu', name='rpn_relu_' + suffix) rpn_cls_score = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name='rpn_cls_score_' + suffix, weight=self.shared_param_dict['rpn_cls_score_weight'], bias=self.shared_param_dict['rpn_cls_score_bias']) rpn_bbox_pred = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name='rpn_bbox_pred_' + suffix, weight=self.shared_param_dict['rpn_bbox_pred_weight'], bias=self.shared_param_dict['rpn_bbox_pred_bias']) # n x (2*A) x H x W => n x 2 x (A*H*W) rpn_cls_score_t1 = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, -1, 0), name='rpn_cls_score_t1_' + suffix) rpn_cls_score_t2 = mx.sym.Reshape(data=rpn_cls_score_t1, shape=(0, 2, -1), name='rpn_cls_score_t2_' + suffix) rpn_cls_prob = mx.sym.SoftmaxActivation(data=rpn_cls_score_t1, mode='channel', name='rpn_cls_prob_' + suffix) rpn_cls_prob_t = mx.sym.Reshape(data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_t_' + suffix) rpn_bbox_pred_t = mx.sym.Reshape(data=rpn_bbox_pred, shape=(0, 0, -1), name='rpn_bbox_pred_t_' + suffix) return rpn_cls_score_t2, rpn_cls_prob_t, rpn_bbox_pred_t, rpn_bbox_pred def get_deformable_roipooling(self, name, data, rois, output_dim, spatial_scale, param_name, group_size=1, pooled_size=7, sample_per_part=4, part_size=7): offset = mx.contrib.sym.DeformablePSROIPooling(name='offset_' + name + '_t', data=data, rois=rois, group_size=group_size, pooled_size=pooled_size, sample_per_part=sample_per_part, no_trans=True, part_size=part_size, output_dim=output_dim, spatial_scale=spatial_scale) offset = mx.sym.FullyConnected(name='offset_' + name, data=offset, num_hidden=part_size * part_size * 2, lr_mult=0.01, weight=self.shared_param_dict['offset_' + param_name + '_weight'], bias=self.shared_param_dict['offset_' + param_name + '_bias']) offset_reshape = mx.sym.Reshape(data=offset, shape=(-1, 2, part_size, part_size), name='offset_reshape_' + name) output = mx.contrib.sym.DeformablePSROIPooling(name='deformable_roi_pool_' + name, data=data, rois=rois, trans=offset_reshape, group_size=group_size, pooled_size=pooled_size, sample_per_part=sample_per_part, no_trans=False, part_size=part_size, output_dim=output_dim, spatial_scale=spatial_scale, trans_std=0.1) return output def get_symbol(self, cfg, is_train=True): # config alias for convenient num_classes = cfg.dataset.NUM_CLASSES num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes) data = mx.sym.Variable(name="data") im_info = mx.sym.Variable(name="im_info") # shared convolutional layers res2, res3, res4, res5 = self.get_resnet_backbone(data, with_dpyramid=True, with_dconv=True) fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6 = self.get_fpn_feature(res2, res3, res4, res5) rpn_cls_score_p2, rpn_prob_p2, rpn_bbox_loss_p2, rpn_bbox_pred_p2 = self.get_rpn_subnet(fpn_p2, cfg.network.NUM_ANCHORS, 'p2') rpn_cls_score_p3, rpn_prob_p3, rpn_bbox_loss_p3, rpn_bbox_pred_p3 = self.get_rpn_subnet(fpn_p3, cfg.network.NUM_ANCHORS, 'p3') rpn_cls_score_p4, rpn_prob_p4, rpn_bbox_loss_p4, rpn_bbox_pred_p4 = self.get_rpn_subnet(fpn_p4, cfg.network.NUM_ANCHORS, 'p4') rpn_cls_score_p5, rpn_prob_p5, rpn_bbox_loss_p5, rpn_bbox_pred_p5 = self.get_rpn_subnet(fpn_p5, cfg.network.NUM_ANCHORS, 'p5') rpn_cls_score_p6, rpn_prob_p6, rpn_bbox_loss_p6, rpn_bbox_pred_p6 = self.get_rpn_subnet(fpn_p6, cfg.network.NUM_ANCHORS, 'p6') rpn_cls_prob_dict = { 'rpn_cls_prob_stride64': rpn_prob_p6, 'rpn_cls_prob_stride32': rpn_prob_p5, 'rpn_cls_prob_stride16': rpn_prob_p4, 'rpn_cls_prob_stride8': rpn_prob_p3, 'rpn_cls_prob_stride4': rpn_prob_p2, } rpn_bbox_pred_dict = { 'rpn_bbox_pred_stride64': rpn_bbox_pred_p6, 'rpn_bbox_pred_stride32': rpn_bbox_pred_p5, 'rpn_bbox_pred_stride16': rpn_bbox_pred_p4, 'rpn_bbox_pred_stride8': rpn_bbox_pred_p3, 'rpn_bbox_pred_stride4': rpn_bbox_pred_p2, } arg_dict = dict(rpn_cls_prob_dict.items() + rpn_bbox_pred_dict.items()) if is_train: rpn_label = mx.sym.Variable(name='label') rpn_bbox_target = mx.sym.Variable(name='bbox_target') rpn_bbox_weight = mx.sym.Variable(name='bbox_weight') gt_boxes = mx.sym.Variable(name="gt_boxes") rpn_cls_score = mx.sym.Concat(rpn_cls_score_p2, rpn_cls_score_p3, rpn_cls_score_p4, rpn_cls_score_p5, rpn_cls_score_p6, dim=2) rpn_bbox_loss = mx.sym.Concat(rpn_bbox_loss_p2, rpn_bbox_loss_p3, rpn_bbox_loss_p4, rpn_bbox_loss_p5, rpn_bbox_loss_p6, dim=2) # RPN classification loss rpn_cls_output = mx.sym.SoftmaxOutput(data=rpn_cls_score, label=rpn_label, multi_output=True, normalization='valid', use_ignore=True, ignore_label=-1, name='rpn_cls_prob') # bounding box regression rpn_bbox_loss = rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_l1', scalar=3.0, data=(rpn_bbox_loss - rpn_bbox_target)) rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss, grad_scale=1.0 / cfg.TRAIN.RPN_BATCH_SIZE) aux_dict = { 'op_type': 'pyramid_proposal', 'name': 'rois', 'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE), 'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS), 'rpn_pre_nms_top_n': cfg.TRAIN.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TRAIN.RPN_POST_NMS_TOP_N, 'threshold': cfg.TRAIN.RPN_NMS_THRESH, 'rpn_min_size': cfg.TRAIN.RPN_MIN_SIZE } # ROI proposal rois = mx.sym.Custom(**dict(arg_dict.items() + aux_dict.items())) # ROI proposal target gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=(-1, 5), name='gt_boxes_reshape') rois, label, bbox_target, bbox_weight \ = mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target', num_classes=num_reg_classes, batch_images=cfg.TRAIN.BATCH_IMAGES, batch_rois=cfg.TRAIN.BATCH_ROIS, cfg=cPickle.dumps(cfg), fg_fraction=cfg.TRAIN.FG_FRACTION) else: aux_dict = { 'op_type': 'pyramid_proposal', 'name': 'rois', 'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE), 'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS), 'rpn_pre_nms_top_n': cfg.TEST.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TEST.RPN_POST_NMS_TOP_N, 'threshold': cfg.TEST.RPN_NMS_THRESH, 'rpn_min_size': cfg.TEST.RPN_MIN_SIZE } # ROI proposal rois = mx.sym.Custom(**dict(arg_dict.items() + aux_dict.items())) offset_p2_weight = mx.sym.Variable(name='offset_p2_weight', dtype=np.float32, lr_mult=0.01) offset_p3_weight = mx.sym.Variable(name='offset_p3_weight', dtype=np.float32, lr_mult=0.01) offset_p4_weight = mx.sym.Variable(name='offset_p4_weight', dtype=np.float32, lr_mult=0.01) offset_p5_weight = mx.sym.Variable(name='offset_p5_weight', dtype=np.float32, lr_mult=0.01) offset_p2_bias = mx.sym.Variable(name='offset_p2_bias', dtype=np.float32, lr_mult=0.01) offset_p3_bias = mx.sym.Variable(name='offset_p3_bias', dtype=np.float32, lr_mult=0.01) offset_p4_bias = mx.sym.Variable(name='offset_p4_bias', dtype=np.float32, lr_mult=0.01) offset_p5_bias = mx.sym.Variable(name='offset_p5_bias', dtype=np.float32, lr_mult=0.01) roi_pool = mx.symbol.Custom(data_p2=fpn_p2, data_p3=fpn_p3, data_p4=fpn_p4, data_p5=fpn_p5, offset_weight_p2=offset_p2_weight, offset_bias_p2=offset_p2_bias, offset_weight_p3=offset_p3_weight, offset_bias_p3=offset_p3_bias, offset_weight_p4=offset_p4_weight, offset_bias_p4=offset_p4_bias, offset_weight_p5=offset_p5_weight, offset_bias_p5=offset_p5_bias, rois=rois, op_type='fpn_roi_pooling', name='fpn_roi_pooling', with_deformable=True) # 2 fc fc_new_1 = mx.symbol.FullyConnected(name='fc_new_1', data=roi_pool, num_hidden=1024) fc_new_1_relu = mx.sym.Activation(data=fc_new_1, act_type='relu', name='fc_new_1_relu') fc_new_2 = mx.symbol.FullyConnected(name='fc_new_2', data=fc_new_1_relu, num_hidden=1024) fc_new_2_relu = mx.sym.Activation(data=fc_new_2, act_type='relu', name='fc_new_2_relu') # cls_score/bbox_pred cls_score = mx.symbol.FullyConnected(name='cls_score', data=fc_new_2_relu, num_hidden=num_classes) bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=fc_new_2_relu, num_hidden=num_reg_classes * 4) if is_train: if cfg.TRAIN.ENABLE_OHEM: labels_ohem, bbox_weights_ohem = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes, num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM, cls_score=cls_score, bbox_pred=bbox_pred, labels=label, bbox_targets=bbox_target, bbox_weights=bbox_weight) cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=-1) bbox_loss_ = bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target)) bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS_OHEM) rcnn_label = labels_ohem else: cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid') bbox_loss_ = bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target)) bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS) rcnn_label = label # reshape output rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, -1), name='label_reshape') cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape') bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_loss_reshape') group = mx.sym.Group([rpn_cls_output, rpn_bbox_loss, cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)]) else: cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score) cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape') bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_pred_reshape') group = mx.sym.Group([rois, cls_prob, bbox_pred]) self.sym = group return group def init_weight_rcnn(self, cfg, arg_params, aux_params): arg_params['fc_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_1_weight']) arg_params['fc_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_1_bias']) arg_params['fc_new_2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_2_weight']) arg_params['fc_new_2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_2_bias']) arg_params['cls_score_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['cls_score_weight']) arg_params['cls_score_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['cls_score_bias']) arg_params['bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['bbox_pred_weight']) arg_params['bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['bbox_pred_bias']) def init_deformable_convnet(self, cfg, arg_params, aux_params): arg_params['res5a_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5a_branch2b_offset_weight']) arg_params['res5a_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5a_branch2b_offset_bias']) arg_params['res5b_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5b_branch2b_offset_weight']) arg_params['res5b_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5b_branch2b_offset_bias']) arg_params['res5c_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5c_branch2b_offset_weight']) arg_params['res5c_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5c_branch2b_offset_bias']) arg_params['res3b3_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res3b3_branch2b_offset_weight']) arg_params['res3b3_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res3b3_branch2b_offset_bias']) arg_params['res4b22_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res4b22_branch2b_offset_weight']) arg_params['res4b22_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res4b22_branch2b_offset_bias']) def init_weight_fpn(self, cfg, arg_params, aux_params): arg_params['fpn_p6_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p6_weight']) arg_params['fpn_p6_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p6_bias']) arg_params['fpn_p5_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_weight']) arg_params['fpn_p5_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_bias']) arg_params['fpn_p4_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_weight']) arg_params['fpn_p4_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_bias']) arg_params['fpn_p3_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_weight']) arg_params['fpn_p3_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_bias']) arg_params['fpn_p2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_weight']) arg_params['fpn_p2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_bias']) arg_params['fpn_p5_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_1x1_weight']) arg_params['fpn_p5_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_1x1_bias']) arg_params['fpn_p4_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_1x1_weight']) arg_params['fpn_p4_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_1x1_bias']) arg_params['fpn_p3_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_1x1_weight']) arg_params['fpn_p3_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_1x1_bias']) arg_params['fpn_p2_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_1x1_weight']) arg_params['fpn_p2_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_1x1_bias']) def init_weight(self, cfg, arg_params, aux_params): # for name in self.shared_param_list: # if 'offset' in name: # arg_params[name + '_weight'] = mx.nd.zeros(shape=self.arg_shape_dict[name + '_weight']) # else: # arg_params[name + '_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[name + '_weight']) # arg_params[name + '_bias'] = mx.nd.zeros(shape=self.arg_shape_dict[name + '_bias']) # self.init_deformable_convnet(cfg, arg_params, aux_params) # self.init_weight_rcnn(cfg, arg_params, aux_params) # self.init_weight_fpn(cfg, arg_params, aux_params) arg_params2, aux_params2 = {}, {} for name in self.shared_param_list: if 'offset' in name: arg_params2[name + '_weight'] = mx.nd.zeros(shape=self.arg_shape_dict[name + '_weight']) else: arg_params2[name + '_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[name + '_weight']) arg_params2[name + '_bias'] = mx.nd.zeros(shape=self.arg_shape_dict[name + '_bias']) self.init_deformable_convnet(cfg, arg_params2, aux_params2) self.init_weight_rcnn(cfg, arg_params2, aux_params2) self.init_weight_fpn(cfg, arg_params2, aux_params2) for k in arg_params2: if (k not in arg_params) or (arg_params[k].shape != arg_params2[k].shape): arg_params[k] = arg_params2[k] for k in aux_params2: if k not in aux_params: aux_params[k] = aux_params2[k]
fpn/symbols/resnet_v1_101_fpn_dcn_rcnn.py
87,595
Use __init__ to define parameter network needs -------------------------------------------------------- Deformable Convolutional Networks Copyright (c) 2017 Microsoft Copyright (c) 2019 IBM Corp Licensed under The Apache-2.0 License [see LICENSE for details] Written by Haozhi Qi -------------------------------------------------------- res5a-bottleneck res5a-shortcut res5b-bottleneck res5b-shortcut res5c-bottleneck res5c-shortcut lateral connection top-down connection FPN feature n x (2*A) x H x W => n x 2 x (A*H*W) config alias for convenient shared convolutional layers RPN classification loss bounding box regression ROI proposal ROI proposal target ROI proposal 2 fc cls_score/bbox_pred reshape output for name in self.shared_param_list: if 'offset' in name: arg_params[name + '_weight'] = mx.nd.zeros(shape=self.arg_shape_dict[name + '_weight']) else: arg_params[name + '_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[name + '_weight']) arg_params[name + '_bias'] = mx.nd.zeros(shape=self.arg_shape_dict[name + '_bias']) self.init_deformable_convnet(cfg, arg_params, aux_params) self.init_weight_rcnn(cfg, arg_params, aux_params) self.init_weight_fpn(cfg, arg_params, aux_params)
1,236
en
0.430587
# -*- coding: utf-8 -*- # Copyright (c) 2022, bahaa and Contributors # See license.txt from __future__ import unicode_literals # import frappe import unittest class TestAlaqoal(unittest.TestCase): pass
calender/calender/doctype/alaqoal/test_alaqoal.py
205
-*- coding: utf-8 -*- Copyright (c) 2022, bahaa and Contributors See license.txt import frappe
94
en
0.669414
# <a href="https://colab.research.google.com/github/couyang24/general_learning-tiffany/blob/master/Titanic/analysis/colab_titanic_main.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Need to mount Drive on or upload kaggle.json from google.colab import drive drive.mount("/content/drive") # !mkdir ~/.kaggle/ # !cp drive/My\ Drive/input/kaggle.json ~/.kaggle/ # !kaggle competitions download -c titanic # Load Package # import numpy as np import pandas as pd import seaborn as sns import featuretools import featuretools as ft from sklearn.base import BaseEstimator, TransformerMixin from sklearn.impute import SimpleImputer, MissingIndicator from sklearn.pipeline import Pipeline, FeatureUnion from sklearn.preprocessing import ( OneHotEncoder, StandardScaler, LabelEncoder, OrdinalEncoder, ) from sklearn.compose import ColumnTransformer from sklearn.model_selection import cross_val_score, RandomizedSearchCV # Load data train_df = pd.read_csv("train.csv") test_df = pd.read_csv("test.csv") # Save data target = train_df[["Survived"]] submission = test_df[["PassengerId"]] # Join and Clean combine = pd.concat([train_df, test_df]) # EDA combine.info() combine.columns mapping = { "Mlle": "Miss", "Major": "Mr", "Col": "Mr", "Sir": "Mr", "Don": "Mr", "Mme": "Miss", "Jonkheer": "Mr", "Lady": "Mrs", "Capt": "Mr", "Countess": "Mrs", "Ms": "Miss", "Dona": "Mrs", } combine["Title"] = combine.Name.apply( lambda x: x.split(".")[0].split(",")[1].strip() ).replace(mapping) combine.drop(["Cabin", "Ticket", "Name"], axis=1, inplace=True) # + # combine['Sex2'] = combine['Sex'].apply(lambda x: 0 if x=='female' else 1) # + # class ModifiedLabelEncoder(LabelEncoder): # def fit_transform(self, y, *args, **kwargs): # return super().fit_transform(y) # def transform(self, y, *args, **kwargs): # return super().transform(y) # + categorical_transformer = Pipeline( steps=[ ("imputer", SimpleImputer(strategy="most_frequent")), ("encode", OrdinalEncoder()), ] ) numeric_transformer = Pipeline([("imputer", SimpleImputer(strategy="median")),]) # - combine[["Sex", "Embarked", "Title"]] = categorical_transformer.fit_transform( combine[["Sex", "Embarked", "Title"]] ) combine[["Age", "Fare"]] = numeric_transformer.fit_transform(combine[["Age", "Fare"]]) # + es = ft.EntitySet(id="titanic_data") es = es.entity_from_dataframe( entity_id="combine", dataframe=combine.drop(["Survived"], axis=1), variable_types={ "Embarked": ft.variable_types.Categorical, "Sex": ft.variable_types.Boolean, "Title": ft.variable_types.Categorical, }, index="PassengerId", ) es # - es = es.normalize_entity( base_entity_id="combine", new_entity_id="Embarked", index="Embarked" ) es = es.normalize_entity(base_entity_id="combine", new_entity_id="Sex", index="Sex") es = es.normalize_entity(base_entity_id="combine", new_entity_id="Title", index="Title") es = es.normalize_entity( base_entity_id="combine", new_entity_id="Pclass", index="Pclass" ) es = es.normalize_entity(base_entity_id="combine", new_entity_id="Parch", index="Parch") es = es.normalize_entity(base_entity_id="combine", new_entity_id="SibSp", index="SibSp") es primitives = ft.list_primitives() pd.options.display.max_colwidth = 100 primitives[primitives["type"] == "aggregation"].head( primitives[primitives["type"] == "aggregation"].shape[0] ) primitives[primitives["type"] == "transform"].head( primitives[primitives["type"] == "transform"].shape[0] ) features, feature_names = ft.dfs( entityset=es, target_entity="combine", # trans_primitives=['subtract_numeric', 'add_numeric', 'divide_numeric', 'multiply_numeric'], max_depth=2, ) feature_names len(feature_names) features.isnull().sum() class RemoveLowInfo(BaseEstimator, TransformerMixin): def __init__(self, threshold): self.threshold = threshold def fit(self, X, y=None): return self def transform(self, X): df = X.copy() keep = [ column for column in df.columns if df[column].value_counts(normalize=True).reset_index(drop=True)[0] < self.threshold ] return df[keep] from sklearn.preprocessing import OneHotEncoder, StandardScaler, FunctionTransformer impute_median = FunctionTransformer(lambda x: x.fillna(x.median()), validate=False) normalize = FunctionTransformer(lambda x: (x - x.mean()) / x.std(), validate=False) from sklearn.decomposition import PCA transformer = Pipeline( [ ("imputer", impute_median), ("removelowinfo", RemoveLowInfo(threshold=0.95)), ("scaler", normalize), ] ) clean_features = transformer.fit_transform(features) # !pip install catboost from sklearn.linear_model import LogisticRegression, SGDClassifier from sklearn.ensemble import ( RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier, BaggingClassifier, VotingClassifier, ) from sklearn.neural_network import MLPClassifier from sklearn.svm import SVC, LinearSVC from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.decomposition import PCA import xgboost as xgb import lightgbm as lgb import catboost as cgb # + methods = [ ("logistic", LogisticRegression(solver="lbfgs")), # ('sgd', SGDClassifier()), ("tree", DecisionTreeClassifier()), ("bag", BaggingClassifier()), ("xgb", xgb.XGBClassifier(max_depth=3)), ("lgb", lgb.LGBMClassifier(max_depth=3)), # ('cgb', cgb.CatBoostClassifier(max_depth=3,silent=True)), ("ada", AdaBoostClassifier()), ("gbm", GradientBoostingClassifier()), ("rf", RandomForestClassifier(n_estimators=100)), # ('svc', LinearSVC()), # ('rbf', SVC()), ("nb", Pipeline([("pca", PCA()), ("gnb", GaussianNB())])), ("nn", MLPClassifier()), ("knn", KNeighborsClassifier()), ] ensemble = VotingClassifier( methods, voting="soft", # weights=[1,1,1,1,2,2,1,1], # flatten_transform=True, ) clf = Pipeline( [ # ('transformer', transformer), ("ensemble", ensemble), ] ) clf.fit(clean_features.iloc[: train_df.shape[0], :], target) # - submission["Survived"] = pd.DataFrame( clf.predict(clean_features.iloc[train_df.shape[0] :, :]) ) print(submission.dtypes) submission.to_csv("titanic_submission.csv", index=False)
Titanic/analysis/colab_titanic_main.py
6,712
<a href="https://colab.research.google.com/github/couyang24/general_learning-tiffany/blob/master/Titanic/analysis/colab_titanic_main.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> Need to mount Drive on or upload kaggle.json !mkdir ~/.kaggle/ !cp drive/My\ Drive/input/kaggle.json ~/.kaggle/ !kaggle competitions download -c titanic Load Package import numpy as np Load data Save data Join and Clean EDA + combine['Sex2'] = combine['Sex'].apply(lambda x: 0 if x=='female' else 1) + class ModifiedLabelEncoder(LabelEncoder): def fit_transform(self, y, *args, **kwargs): return super().fit_transform(y) def transform(self, y, *args, **kwargs): return super().transform(y) + - + - trans_primitives=['subtract_numeric', 'add_numeric', 'divide_numeric', 'multiply_numeric'], !pip install catboost + ('sgd', SGDClassifier()), ('cgb', cgb.CatBoostClassifier(max_depth=3,silent=True)), ('svc', LinearSVC()), ('rbf', SVC()), weights=[1,1,1,1,2,2,1,1], flatten_transform=True, ('transformer', transformer), -
1,195
en
0.336832
# Generated by Django 2.1.3 on 2018-11-24 07:01 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Job', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('image', models.ImageField(upload_to='images/')), ('summary', models.CharField(max_length=200)), ], ), ]
jobs/migrations/0001_initial.py
553
Generated by Django 2.1.3 on 2018-11-24 07:01
45
en
0.551468
import os import pandas as pd import re def sort_human(l): """Sort a list of strings by numerical.""" def convert(text): return float(text) if text.isdigit() else text def alphanum(key): return [convert(c) for c in re.split('([-+]?[0-9]*\.?[0-9]*)', key)] l.sort(key=alphanum) return l def data_merge_by_batch(parent_directory, verbose=True): """Merge a set of parameters.csv files into one. This is intended for use with batch processes from Legion, with each batch being 1000 runs longand numbered with integer values. Parameters ---------- parent_directory : :obj:`list` of :obj:`str` Parent directory to a set of directories each containing model runs and a parameters.csv file. verbose : :obj:`boolean`, optional Boolean indicator of whether to print extra information. Returns ------- None Concatenated will be written to file in `parent_directory` """ dirs = [os.path.abspath(os.path.join(parent_directory, d)) for d in os.listdir(parent_directory) if os.path.isdir(os.path.abspath( os.path.join(parent_directory, d))) and d != 'archives'] dirs = sort_human(dirs) if verbose: print(dirs) dfs = [] for d in dirs: try: dfs.append(pd.read_csv(os.path.join(d, 'parameters.csv'))) ii = len(dfs) - 1 print("Processing parameter file {}".format(ii)) if ii is not 0: dfs[ii]['ix'] = dfs[ii].index.values + \ dfs[ii - 1]['ix'].values[-1] + 1 else: dfs[ii]['ix'] = dfs[ii].index.values if os.path.split(d)[1].split('_')[-1].isdigit(): print(os.path.split(d)[1].split('_')[-1]) dfs[ii]['Batch'] = int(os.path.split(d)[1].split('_')[-1]) else: print("Batch number not found for {}".format(d)) continue except FileNotFoundError: print("No parameters file in {}".format(d)) continue if verbose: print("{} dataframes to be joined".format(len(dfs))) # for ii in range(len(dfs)): # if ii is not 0: # dfs[ii]['ix'] = dfs[ii].index.values + dfs[ii - 1]['ix'].values[-1] # else: # dfs[ii]['ix'] = dfs[ii].index.values # if os.path.split(dirs[ii])[1][:4].isdigit(): # print(os.path.split(dirs[ii])[1][:4]) # dfs[ii]['Start Time'] = os.path.split(dirs[ii])[1][:4] # else: # continue df = pd.concat(dfs) df.index = range(len(df)) output_file = os.path.join(parent_directory, 'all_parameters.csv') df.to_csv(output_file, index=False) return output_file
results_processing/ABC/csv_processing.py
2,842
Merge a set of parameters.csv files into one. This is intended for use with batch processes from Legion, with each batch being 1000 runs longand numbered with integer values. Parameters ---------- parent_directory : :obj:`list` of :obj:`str` Parent directory to a set of directories each containing model runs and a parameters.csv file. verbose : :obj:`boolean`, optional Boolean indicator of whether to print extra information. Returns ------- None Concatenated will be written to file in `parent_directory` Sort a list of strings by numerical. for ii in range(len(dfs)): if ii is not 0: dfs[ii]['ix'] = dfs[ii].index.values + dfs[ii - 1]['ix'].values[-1] else: dfs[ii]['ix'] = dfs[ii].index.values if os.path.split(dirs[ii])[1][:4].isdigit(): print(os.path.split(dirs[ii])[1][:4]) dfs[ii]['Start Time'] = os.path.split(dirs[ii])[1][:4] else: continue
893
en
0.524074
#! /usr/bin/enc python # -*- coding: utf-8 -*- # author: Irving He # email: [email protected] import logging import argparse import os import random import numpy as np from tqdm import tqdm import datetime from datetime import timedelta import torch import torch.distributed as dist from Data_utils import get_loader from Data_utils import CONFIGS from Model import VITransModel from Utils import WarmupCosineSchedule,WarmupLinearSchedule from Utils import set_seed, AverageMeter, simple_accuracy, model_save from tensorboardX import SummaryWriter def count_parameters(model): params = sum(p.numel() for p in model.parameters() if p.requires_grad) return params/1000000 """Config""" class VITConfig: log_dir = "./TB_log/" dataset = "cifar10" # "cifar100" model_type = "ViT-B_16" pretrained_dir = "./Pretrained/imagenet21k_ViT-B_16.npz" # 预训练模型存放位置 save_dir = "./Model/" record_algo = "Pretrained_VIT_Cifar10_ViTB16_" test_cycles = datetime.datetime.now().strftime('%Y%m%d_%H%M') decay_type = "cosine" # "cosine", "linear" 决定了学习率Scheduler类型 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") TB_log = True img_size = 224 train_batch_size = 64 #512 eval_batch_size = 32 #64 eval_every = 100 # Run prediction on validation set every so many steps. learning_rate = 3e-2 # SGD起始学习率 weight_decay = 0 # num_steps = 10000 # Total number of training epochs to perform. warmup_steps = 500 # 开始的Warmup Step数 max_grad_norm = 1.0 local_rank = -1 # local_rank for distributed training on gpus seed = 42 gradient_accumulation_steps = 1 # Number of updates steps to accumulate before performing a backward/update pass. """Model Valid Process""" def valid(args,model,writer,test_loader,global_step): """ :param args: 参数Config :param model: 需验证模型 :param writer: TB写入 :param test_loader: 测试数据集 :param global_step: 全局step :return: """ # Validation eval_losses = AverageMeter() model.eval() all_preds, all_label = [],[] epoch_iterator = tqdm(test_loader, desc="Validating... (loss=X.X)", bar_format="{l_bar}{r_bar}", dynamic_ncols=True) loss_fct = torch.nn.CrossEntropyLoss() global_eval_step = 0 for step, batch in enumerate(epoch_iterator): global_eval_step += 1 batch = tuple(t.to(args.device) for t in batch) x,y = batch with torch.no_grad(): logits = model(x)[0] eval_loss = loss_fct(logits,y) eval_losses.update(eval_loss.item()) #滑动平均 preds = torch.argmax(logits,dim=-1) if len(all_preds) == 0: all_preds.append(preds.detach().cpu().numpy()) all_label.append(y.detach().cpu().numpy()) else: # append在后面 all_preds[0] = np.append(all_preds[0], preds.detach().cpu().numpy(), axis=0) all_label[0] = np.append(all_label[0], y.detach().cpu().numpy(), axis=0) epoch_iterator.set_description("Validating... (loss=%2.5f)" % eval_losses.val) writer.add_scalar("Train/loss", scalar_value=eval_losses.val, global_step=global_eval_step) all_preds, all_label = all_preds[0], all_label[0] # all_preds: numpy.array; all_label: numpy.array; accuracy = simple_accuracy(all_preds,all_label) writer.add_scalar("test/accuracy",scalar_value=accuracy,global_step=global_step) return accuracy """Model Training Process""" def train(args=VITConfig()): """ :param args: - log_dir """ # 模型准备 pretrained_model_config = CONFIGS[args.model_type] num_classes = 10 if args.dataset == "cifar10" else 100 model = VITransModel(pretrained_model_config, args.img_size, zero_head=True, num_classes=num_classes) model.load_from(np.load(args.pretrained_dir)) model.to(device=args.device) num_params = count_parameters(model) if args.TB_log: os.makedirs(args.log_dir, exist_ok=True) writer = SummaryWriter(logdir=args.log_dir + args.record_algo + args.test_cycles) args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps # 1. DATA准备 train_loader, test_loader = get_loader(args) # 2. 准备优化器以及Scheduler optimizer = torch.optim.SGD(model.parameters(), lr = args.learning_rate, # init lr momentum=0.9, weight_decay=args.weight_decay) t_total = args.num_steps # Total time steps if args.decay_type == "cosine": scheduler = WarmupCosineSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total) else: scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total) # 3. Training model.zero_grad() set_seed(args.seed) losses = AverageMeter() global_step = 0 best_acc = 0 while True: model.train() # 一个数据迭代器 epoch_iterator = tqdm(train_loader, desc="Training (X / X Steps) (loss=X.X)", bar_format="{l_bar}{r_bar}", dynamic_ncols=True) for step, batch in enumerate(epoch_iterator): batch = tuple(t.to(args.device) for t in batch) x,y = batch # XData, YLabel loss = model.forward(x,y) loss.backward() if (step+1)%args.gradient_accumulation_steps == 0: losses.update(loss.item()*args.gradient_accumulation_steps) torch.nn.utils.clip_grad_norm(model.parameters(),1.0) scheduler.step() optimizer.step() optimizer.zero_grad() global_step += 1 # Print Training Info epoch_iterator.set_description( "Training (%d / %d Steps) (loss=%2.5f)" % (global_step, t_total, losses.val) ) writer.add_scalar("Train/loss",scalar_value=losses.val, global_step=global_step) writer.add_scalar("Train/lr", scalar_value=scheduler.get_lr()[0], global_step=global_step) # Valid ... if global_step % args.eval_every == 0: accuracy = valid(args, model, writer, test_loader, global_step) if best_acc < accuracy: best_acc = accuracy model_save(args.record_algo+args.test_cycles,model) model.train() if global_step % t_total == 0: break losses.reset() if global_step % t_total == 0: break writer.close() print("==="*30) print("Best Accuracy: \t%f" % best_acc) print("End Training!") print("==="*30) if __name__ == "__main__": train() # all_preds = [] # all_labels = [] # # all_pred = torch.tensor([1,0,1,1,0,1]) # all_label = torch.tensor([1,1,1,1,1,1]) # # all_preds.append(all_pred) # all_labels.append(all_label) # print(all_preds) # all_preds[0] = np.append(all_preds[0],all_label,axis=0) # all_labels[0] = np.append(all_labels[0],all_pred,axis=0) # print(type(all_preds[0])) # print(type(all_labels[0])) # acc = simple_accuracy(all_preds[0],all_labels[0]) # print(acc)
VIT/Train.py
7,578
:param args: - log_dir :param args: 参数Config :param model: 需验证模型 :param writer: TB写入 :param test_loader: 测试数据集 :param global_step: 全局step :return: ! /usr/bin/enc python -*- coding: utf-8 -*- author: Irving He email: [email protected] "cifar100" 预训练模型存放位置 "cosine", "linear" 决定了学习率Scheduler类型51264 Run prediction on validation set every so many steps. SGD起始学习率 Total number of training epochs to perform. 开始的Warmup Step数 local_rank for distributed training on gpus Number of updates steps to accumulate before performing a backward/update pass. Validation滑动平均 append在后面 all_preds: numpy.array; all_label: numpy.array; 模型准备 1. DATA准备 2. 准备优化器以及Scheduler init lr Total time steps 3. Training 一个数据迭代器 XData, YLabel Print Training Info Valid ... all_preds = [] all_labels = [] all_pred = torch.tensor([1,0,1,1,0,1]) all_label = torch.tensor([1,1,1,1,1,1]) all_preds.append(all_pred) all_labels.append(all_label) print(all_preds) all_preds[0] = np.append(all_preds[0],all_label,axis=0) all_labels[0] = np.append(all_labels[0],all_pred,axis=0) print(type(all_preds[0])) print(type(all_labels[0])) acc = simple_accuracy(all_preds[0],all_labels[0]) print(acc)
1,162
en
0.389346
""" Django settings for hiren project. Generated by 'django-admin startproject' using Django 1.8.4. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os import json from celery.schedules import crontab BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # load json file baby :D try: with open('config.json') as f: JSON_DATA = json.load(f) except FileNotFoundError: with open('config.sample.json') as f: JSON_DATA = json.load(f) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.environ.get('SECRET_KEY', JSON_DATA['secret_key']) # SECURITY WARNING: don't run with debug turned on in production! DEBUG = os.environ.get('DEBUG', False) ALLOWED_HOSTS = ['*'] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'debug_toolbar', 'github' ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'hiren.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': ['templates'], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'hiren.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases if 'TRAVIS' in os.environ: DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'travisci', 'USER': 'postgres', 'PASSWORD': '', 'HOST': 'localhost', 'PORT': '', } } else: DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'hiren_github_management', 'USER': 'hiren', 'PASSWORD': 'hiren', 'HOST': 'localhost', 'PORT': '', } } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'Asia/Dhaka' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/' STATICFILES_FINDERS = ( "django.contrib.staticfiles.finders.FileSystemFinder", "django.contrib.staticfiles.finders.AppDirectoriesFinder" ) STATICFILES_DIRS = ( os.path.join(BASE_DIR, "static"), ) LOGIN_URL = '/' # CELERY STUFF BROKER_URL = 'redis://localhost:6379' CELERY_RESULT_BACKEND = 'redis://localhost:6379' CELERY_ACCEPT_CONTENT = ['application/json'] CELERY_TASK_SERIALIZER = 'json' CELERY_RESULT_SERIALIZER = 'json' CELERYBEAT_SCHEDULE = { 'add-every-30-seconds': { 'task': 'github.tasks.get_data', 'schedule': crontab(minute=0, hour='22'), # execute every day at 10 pm }, }
hiren/settings.py
4,055
Django settings for hiren project. Generated by 'django-admin startproject' using Django 1.8.4. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ Build paths inside the project like this: os.path.join(BASE_DIR, ...) load json file baby :D Quick-start development settings - unsuitable for production See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ SECURITY WARNING: keep the secret key used in production secret! SECURITY WARNING: don't run with debug turned on in production! Application definition Database https://docs.djangoproject.com/en/1.8/ref/settings/databases Internationalization https://docs.djangoproject.com/en/1.8/topics/i18n/ Static files (CSS, JavaScript, Images) https://docs.djangoproject.com/en/1.8/howto/static-files/ CELERY STUFF execute every day at 10 pm
953
en
0.662831
__author__ = "Stefan Weißenberger and Johannes Gasteiger" __license__ = "MIT" import os import numpy as np from scipy.linalg import expm import torch from torch_geometric.data import Data, InMemoryDataset from torch_geometric.datasets import Planetoid, Amazon, Coauthor from seeds import development_seed DATA_PATH = 'data' def get_dataset(name: str, use_lcc: bool = True) -> InMemoryDataset: path = os.path.join(DATA_PATH, name) if name in ['Cora', 'Citeseer', 'Pubmed']: dataset = Planetoid(path, name) elif name in ['Computers', 'Photo']: dataset = Amazon(path, name) elif name == 'CoauthorCS': dataset = Coauthor(path, 'CS') else: raise Exception('Unknown dataset.') if use_lcc: lcc = get_largest_connected_component(dataset) x_new = dataset.data.x[lcc] y_new = dataset.data.y[lcc] row, col = dataset.data.edge_index.numpy() edges = [[i, j] for i, j in zip(row, col) if i in lcc and j in lcc] edges = remap_edges(edges, get_node_mapper(lcc)) data = Data( x=x_new, edge_index=torch.LongTensor(edges), y=y_new, train_mask=torch.zeros(y_new.size()[0], dtype=torch.bool), test_mask=torch.zeros(y_new.size()[0], dtype=torch.bool), val_mask=torch.zeros(y_new.size()[0], dtype=torch.bool) ) dataset.data = data return dataset def get_component(dataset: InMemoryDataset, start: int = 0) -> set: visited_nodes = set() queued_nodes = set([start]) row, col = dataset.data.edge_index.numpy() while queued_nodes: current_node = queued_nodes.pop() visited_nodes.update([current_node]) neighbors = col[np.where(row == current_node)[0]] neighbors = [n for n in neighbors if n not in visited_nodes and n not in queued_nodes] queued_nodes.update(neighbors) return visited_nodes def get_largest_connected_component(dataset: InMemoryDataset) -> np.ndarray: remaining_nodes = set(range(dataset.data.x.shape[0])) comps = [] while remaining_nodes: start = min(remaining_nodes) comp = get_component(dataset, start) comps.append(comp) remaining_nodes = remaining_nodes.difference(comp) return np.array(list(comps[np.argmax(list(map(len, comps)))])) def get_node_mapper(lcc: np.ndarray) -> dict: mapper = {} counter = 0 for node in lcc: mapper[node] = counter counter += 1 return mapper def remap_edges(edges: list, mapper: dict) -> list: row = [e[0] for e in edges] col = [e[1] for e in edges] row = list(map(lambda x: mapper[x], row)) col = list(map(lambda x: mapper[x], col)) return [row, col] def get_adj_matrix(dataset: InMemoryDataset) -> np.ndarray: num_nodes = dataset.data.x.shape[0] adj_matrix = np.zeros(shape=(num_nodes, num_nodes)) for i, j in zip(dataset.data.edge_index[0], dataset.data.edge_index[1]): adj_matrix[i, j] = 1. return adj_matrix def get_ppr_matrix( adj_matrix: np.ndarray, alpha: float = 0.1) -> np.ndarray: num_nodes = adj_matrix.shape[0] A_tilde = adj_matrix + np.eye(num_nodes) D_tilde = np.diag(1/np.sqrt(A_tilde.sum(axis=1))) H = D_tilde @ A_tilde @ D_tilde return alpha * np.linalg.inv(np.eye(num_nodes) - (1 - alpha) * H) def get_heat_matrix( adj_matrix: np.ndarray, t: float = 5.0) -> np.ndarray: num_nodes = adj_matrix.shape[0] A_tilde = adj_matrix + np.eye(num_nodes) D_tilde = np.diag(1/np.sqrt(A_tilde.sum(axis=1))) H = D_tilde @ A_tilde @ D_tilde return expm(-t * (np.eye(num_nodes) - H)) def get_top_k_matrix(A: np.ndarray, k: int = 128) -> np.ndarray: num_nodes = A.shape[0] row_idx = np.arange(num_nodes) A[A.argsort(axis=0)[:num_nodes - k], row_idx] = 0. norm = A.sum(axis=0) norm[norm <= 0] = 1 # avoid dividing by zero return A/norm def get_clipped_matrix(A: np.ndarray, eps: float = 0.01) -> np.ndarray: num_nodes = A.shape[0] A[A < eps] = 0. norm = A.sum(axis=0) norm[norm <= 0] = 1 # avoid dividing by zero return A/norm def set_train_val_test_split( seed: int, data: Data, num_development: int = 1500, num_per_class: int = 20) -> Data: rnd_state = np.random.RandomState(development_seed) num_nodes = data.y.shape[0] development_idx = rnd_state.choice(num_nodes, num_development, replace=False) test_idx = [i for i in np.arange(num_nodes) if i not in development_idx] train_idx = [] rnd_state = np.random.RandomState(seed) for c in range(data.y.max() + 1): class_idx = development_idx[np.where(data.y[development_idx].cpu() == c)[0]] train_idx.extend(rnd_state.choice(class_idx, num_per_class, replace=False)) val_idx = [i for i in development_idx if i not in train_idx] def get_mask(idx): mask = torch.zeros(num_nodes, dtype=torch.bool) mask[idx] = 1 return mask data.train_mask = get_mask(train_idx) data.val_mask = get_mask(val_idx) data.test_mask = get_mask(test_idx) return data class PPRDataset(InMemoryDataset): """ Dataset preprocessed with GDC using PPR diffusion. Note that this implementations is not scalable since we directly invert the adjacency matrix. """ def __init__(self, name: str = 'Cora', use_lcc: bool = True, alpha: float = 0.1, k: int = 16, eps: float = None): self.name = name self.use_lcc = use_lcc self.alpha = alpha self.k = k self.eps = eps super(PPRDataset, self).__init__(DATA_PATH) self.data, self.slices = torch.load(self.processed_paths[0]) @property def raw_file_names(self) -> list: return [] @property def processed_file_names(self) -> list: return [str(self) + '.pt'] def download(self): pass def process(self): base = get_dataset(name=self.name, use_lcc=self.use_lcc) # generate adjacency matrix from sparse representation adj_matrix = get_adj_matrix(base) # obtain exact PPR matrix ppr_matrix = get_ppr_matrix(adj_matrix, alpha=self.alpha) if self.k: print(f'Selecting top {self.k} edges per node.') ppr_matrix = get_top_k_matrix(ppr_matrix, k=self.k) elif self.eps: print(f'Selecting edges with weight greater than {self.eps}.') ppr_matrix = get_clipped_matrix(ppr_matrix, eps=self.eps) else: raise ValueError # create PyG Data object edges_i = [] edges_j = [] edge_attr = [] for i, row in enumerate(ppr_matrix): for j in np.where(row > 0)[0]: edges_i.append(i) edges_j.append(j) edge_attr.append(ppr_matrix[i, j]) edge_index = [edges_i, edges_j] data = Data( x=base.data.x, edge_index=torch.LongTensor(edge_index), edge_attr=torch.FloatTensor(edge_attr), y=base.data.y, train_mask=torch.zeros(base.data.train_mask.size()[0], dtype=torch.bool), test_mask=torch.zeros(base.data.test_mask.size()[0], dtype=torch.bool), val_mask=torch.zeros(base.data.val_mask.size()[0], dtype=torch.bool) ) data, slices = self.collate([data]) torch.save((data, slices), self.processed_paths[0]) def __str__(self) -> str: return f'{self.name}_ppr_alpha={self.alpha}_k={self.k}_eps={self.eps}_lcc={self.use_lcc}' class HeatDataset(InMemoryDataset): """ Dataset preprocessed with GDC using heat kernel diffusion. Note that this implementations is not scalable since we directly calculate the matrix exponential of the adjacency matrix. """ def __init__(self, name: str = 'Cora', use_lcc: bool = True, t: float = 5.0, k: int = 16, eps: float = None): self.name = name self.use_lcc = use_lcc self.t = t self.k = k self.eps = eps super(HeatDataset, self).__init__(DATA_PATH) self.data, self.slices = torch.load(self.processed_paths[0]) @property def raw_file_names(self) -> list: return [] @property def processed_file_names(self) -> list: return [str(self) + '.pt'] def download(self): pass def process(self): base = get_dataset(name=self.name, use_lcc=self.use_lcc) # generate adjacency matrix from sparse representation adj_matrix = get_adj_matrix(base) # get heat matrix as described in Berberidis et al., 2019 heat_matrix = get_heat_matrix(adj_matrix, t=self.t) if self.k: print(f'Selecting top {self.k} edges per node.') heat_matrix = get_top_k_matrix(heat_matrix, k=self.k) elif self.eps: print(f'Selecting edges with weight greater than {self.eps}.') heat_matrix = get_clipped_matrix(heat_matrix, eps=self.eps) else: raise ValueError # create PyG Data object edges_i = [] edges_j = [] edge_attr = [] for i, row in enumerate(heat_matrix): for j in np.where(row > 0)[0]: edges_i.append(i) edges_j.append(j) edge_attr.append(heat_matrix[i, j]) edge_index = [edges_i, edges_j] data = Data( x=base.data.x, edge_index=torch.LongTensor(edge_index), edge_attr=torch.FloatTensor(edge_attr), y=base.data.y, train_mask=torch.zeros(base.data.train_mask.size()[0], dtype=torch.bool), test_mask=torch.zeros(base.data.test_mask.size()[0], dtype=torch.bool), val_mask=torch.zeros(base.data.val_mask.size()[0], dtype=torch.bool) ) data, slices = self.collate([data]) torch.save((data, slices), self.processed_paths[0]) def __str__(self) -> str: return f'{self.name}_heat_t={self.t}_k={self.k}_eps={self.eps}_lcc={self.use_lcc}'
data.py
10,715
Dataset preprocessed with GDC using heat kernel diffusion. Note that this implementations is not scalable since we directly calculate the matrix exponential of the adjacency matrix. Dataset preprocessed with GDC using PPR diffusion. Note that this implementations is not scalable since we directly invert the adjacency matrix. avoid dividing by zero avoid dividing by zero generate adjacency matrix from sparse representation obtain exact PPR matrix create PyG Data object generate adjacency matrix from sparse representation get heat matrix as described in Berberidis et al., 2019 create PyG Data object
606
en
0.806371
"""This contains all of the model filters used by the Shepherd application.""" # Django & Other 3rd Party Libraries import django_filters from crispy_forms.bootstrap import ( Accordion, AccordionGroup, InlineCheckboxes, PrependedText, ) from crispy_forms.helper import FormHelper from crispy_forms.layout import HTML, ButtonHolder, Column, Layout, Row, Submit from django import forms from django.forms.widgets import TextInput # Ghostwriter Libraries from .models import Domain, DomainStatus, HealthStatus, ServerStatus class DomainFilter(django_filters.FilterSet): """ Filter :model:`shepherd.Domain` model for searching. **Fields** ``name`` Case insensitive search of the name field contents ``all_cat`` Case insensitive search of the all_cat field ``health_status`` Checkbox choice filter using :model:`shepherd.HealthStatus` ``domain_status`` Checkbox choice filter using :model:`shepherd.DomainStatus` ``expiration_status`` Boolean field to filter expired domains """ name = django_filters.CharFilter( lookup_expr="icontains", label="Domain Name Contains", widget=TextInput(attrs={"placeholder": "specterops.io", "autocomplete": "off"}), ) all_cat = django_filters.CharFilter( lookup_expr="icontains", label="Categories Contain", widget=TextInput(attrs={"placeholder": "Category", "autocomplete": "off"}), ) health_status = django_filters.ModelMultipleChoiceFilter( queryset=HealthStatus.objects.all(), widget=forms.CheckboxSelectMultiple, label="", ) domain_status = django_filters.ModelMultipleChoiceFilter( queryset=DomainStatus.objects.all(), widget=forms.CheckboxSelectMultiple, label="", ) STATUS_CHOICES = ( (0, "Active"), (1, "Expired"), ) expiration_status = django_filters.ChoiceFilter( field_name="expired", choices=STATUS_CHOICES, label="Expiration Status" ) class Meta: model = Domain fields = ["name", "all_cat", "health_status", "domain_status"] def __init__(self, *args, **kwargs): super(DomainFilter, self).__init__(*args, **kwargs) self.helper = FormHelper() self.helper.form_method = "get" self.helper.form_class = "newitem" self.helper.form_show_labels = False # Layout the form for Bootstrap self.helper.layout = Layout( Row( Column( PrependedText("name", '<i class="fas fa-filter"></i>'), css_class="col-md-4 offset-md-2", ), Column( PrependedText("all_cat", '<i class="fas fa-filter"></i>'), css_class=" col-md-4", ), css_class="form-row", ), Accordion( AccordionGroup("Domain Statuses", InlineCheckboxes("domain_status")), AccordionGroup("Health Statuses", InlineCheckboxes("health_status")), ), ButtonHolder( Submit("submit", "Filter", css_class="btn btn-primary col-md-2"), HTML( """ <a class="btn btn-outline-secondary col-md-2" role="button" href="{% url 'shepherd:domains' %}">Reset</a> """ ), ), ) class ServerFilter(django_filters.FilterSet): """ Filter :model:`shepherd.StaticServer` model for searching. **Fields** ``io_address`` Case insensitive search of the ip_address field contents ``name`` Case insensitive search of the name field contents ``server_status`` Checkbox choice filter using :model:`shepherd.ServerStatus` """ ip_address = django_filters.CharFilter( lookup_expr="icontains", label="IP Address Contains", widget=TextInput(attrs={"placeholder": "104.31.5.75", "autocomplete": "off"}), ) name = django_filters.CharFilter( lookup_expr="icontains", label="Server Name Contains", widget=TextInput(attrs={"placeholder": "Hostname", "autocomplete": "off"}), ) server_status = django_filters.ModelMultipleChoiceFilter( queryset=ServerStatus.objects.all(), widget=forms.CheckboxSelectMultiple, label="Server Status", ) class Meta: model = Domain fields = ["ip_address", "name", "server_status"] def __init__(self, *args, **kwargs): super(ServerFilter, self).__init__(*args, **kwargs) self.helper = FormHelper() self.helper.form_method = "get" self.helper.form_class = "newitem" self.helper.form_show_labels = False # Layout the form for Bootstrap self.helper.layout = Layout( Row( Column( PrependedText("ip_address", '<i class="fas fa-filter"></i>'), css_class="col-md-4 offset-md-2", ), Column( PrependedText("name", '<i class="fas fa-filter"></i>'), css_class=" col-md-4", ), css_class="form-row", ), Accordion( AccordionGroup("Server Status", InlineCheckboxes("server_status")), ), ButtonHolder( Submit("submit", "Filter", css_class="btn btn-primary col-md-2"), HTML( """ <a class="btn btn-outline-secondary col-md-2" role="button" href="{% url 'shepherd:servers' %}">Reset</a> """ ), ), )
ghostwriter/shepherd/filters.py
5,772
Filter :model:`shepherd.Domain` model for searching. **Fields** ``name`` Case insensitive search of the name field contents ``all_cat`` Case insensitive search of the all_cat field ``health_status`` Checkbox choice filter using :model:`shepherd.HealthStatus` ``domain_status`` Checkbox choice filter using :model:`shepherd.DomainStatus` ``expiration_status`` Boolean field to filter expired domains Filter :model:`shepherd.StaticServer` model for searching. **Fields** ``io_address`` Case insensitive search of the ip_address field contents ``name`` Case insensitive search of the name field contents ``server_status`` Checkbox choice filter using :model:`shepherd.ServerStatus` This contains all of the model filters used by the Shepherd application. Django & Other 3rd Party Libraries Ghostwriter Libraries Layout the form for Bootstrap Layout the form for Bootstrap
906
en
0.599886
# Copyright 2018 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Unit tests for the :mod:`pennylane.interface.tf` QNode interface. """ import pytest import numpy as np try: import tensorflow as tf if tf.__version__[0] == "1": import tensorflow.contrib.eager as tfe tf.enable_eager_execution() Variable = tfe.Variable else: from tensorflow import Variable except ImportError as e: pass import pennylane as qml from pennylane.qnode import _flatten, unflatten, QNode, QuantumFunctionError from pennylane.plugins.default_qubit import CNOT, Rotx, Roty, Rotz, I, Y, Z from pennylane._device import DeviceError def expZ(state): return np.abs(state[0]) ** 2 - np.abs(state[1]) ** 2 @pytest.fixture(scope='module') def tf_support(): """Boolean fixture for TensorFlow support""" try: import tensorflow as tf tf_support = True except ImportError as e: tf_support = False return tf_support @pytest.fixture() def skip_if_no_tf_support(tf_support): if not tf_support: pytest.skip("Skipped, no tf support") @pytest.mark.usefixtures("skip_if_no_tf_support") class TestTFQNodeExceptions(): """TFQNode basic tests.""" def test_qnode_fails_on_wrong_return_type(self, qubit_device_2_wires): """The qfunc must return only Expectations""" @qml.qnode(qubit_device_2_wires, interface='tf') def qf(x): qml.RX(x, wires=[0]) return qml.expval(qml.PauliZ(0)), 0.3 with pytest.raises(QuantumFunctionError, match='must return either'): qf(Variable(0.5)) def test_qnode_fails_on_expval_not_returned(self, qubit_device_2_wires): """All expectation values in the qfunc must be returned""" @qml.qnode(qubit_device_2_wires, interface='tf') def qf(x): qml.RX(x, wires=[0]) ex = qml.expval(qml.PauliZ(1)) return qml.expval(qml.PauliZ(0)) with pytest.raises(QuantumFunctionError, match='All measured observables'): qf(Variable(0.5)) def test_qnode_fails_on_wrong_expval_order(self, qubit_device_2_wires): """Expvals must be returned in the order they were created in""" @qml.qnode(qubit_device_2_wires, interface='tf') def qf(x): qml.RX(x, wires=[0]) ex = qml.expval(qml.PauliZ(1)) return qml.expval(qml.PauliZ(0)), ex with pytest.raises(QuantumFunctionError, match='All measured observables'): qf(Variable(0.5)) def test_qnode_fails_on_gates_after_measurements(self, qubit_device_2_wires): """Gates have to precede measurements""" @qml.qnode(qubit_device_2_wires, interface='tf') def qf(x): qml.RX(x, wires=[0]) ev = qml.expval(qml.PauliZ(1)) qml.RY(0.5, wires=[0]) return ev with pytest.raises(QuantumFunctionError, match='gates must precede'): qf(Variable(0.5)) def test_qnode_fails_on_multiple_measurements_of_same_wire(self, qubit_device_2_wires): """A wire can only be measured once""" @qml.qnode(qubit_device_2_wires, interface='tf') def qf(x): qml.RX(x, wires=[0]) qml.CNOT(wires=[0, 1]) return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)), qml.expval(qml.PauliX(0)) with pytest.raises(QuantumFunctionError, match='can only be measured once'): qf(Variable(0.5)) def test_qnode_fails_on_qfunc_with_too_many_wires(self, qubit_device_2_wires): """The device must have sufficient wires for the qfunc""" @qml.qnode(qubit_device_2_wires, interface='tf') def qf(x): qml.RX(x, wires=[0]) qml.CNOT(wires=[0, 2]) return qml.expval(qml.PauliZ(0)) with pytest.raises(QuantumFunctionError, match='applied to invalid wire'): qf(Variable(0.5)) def test_qnode_fails_on_combination_of_cv_and_qbit_ops(self, qubit_device_1_wire): """CV and discrete operations must not be mixed""" @qml.qnode(qubit_device_1_wire, interface='tf') def qf(x): qml.RX(x, wires=[0]) qml.Displacement(0.5, 0, wires=[0]) return qml.expval(qml.PauliZ(0)) with pytest.raises(QuantumFunctionError, match='Continuous and discrete'): qf(Variable(0.5)) def test_qnode_fails_for_cv_ops_on_qubit_device(self, qubit_device_1_wire): """A qubit device cannot execute CV operations""" @qml.qnode(qubit_device_1_wire, interface='tf') def qf(x): qml.Displacement(0.5, 0, wires=[0]) return qml.expval(qml.X(0)) with pytest.raises(DeviceError, match='Gate [a-zA-Z]+ not supported on device'): qf(Variable(0.5)) def test_qnode_fails_for_cv_observables_on_qubit_device(self, qubit_device_1_wire): """A qubit device cannot measure CV observables""" @qml.qnode(qubit_device_1_wire, interface='tf') def qf(x): return qml.expval(qml.X(0)) with pytest.raises(DeviceError, match='Observable [a-zA-Z]+ not supported on device'): qf(Variable(0.5)) @pytest.mark.usefixtures("skip_if_no_tf_support") class TestTFQNodeParameterHandling: """Test that the TFQNode properly handles the parameters of qfuncs""" def test_qnode_fanout(self, qubit_device_1_wire, tol): """Tests that qnodes can compute the correct function when the same parameter is used in multiple gates.""" @qml.qnode(qubit_device_1_wire, interface='tf') def circuit(reused_param, other_param): qml.RX(reused_param, wires=[0]) qml.RZ(other_param, wires=[0]) qml.RX(reused_param, wires=[0]) return qml.expval(qml.PauliZ(0)) thetas = tf.linspace(-2*np.pi, 2*np.pi, 7) for reused_param in thetas: for theta in thetas: other_param = theta ** 2 / 11 y_eval = circuit(reused_param, other_param) Rx = Rotx(reused_param.numpy()) Rz = Rotz(other_param.numpy()) zero_state = np.array([1.,0.]) final_state = (Rx @ Rz @ Rx @ zero_state) y_true = expZ(final_state) assert np.allclose(y_eval, y_true, atol=tol, rtol=0) def test_qnode_array_parameters_scalar_return(self, qubit_device_1_wire, tol): """Test that QNode can take arrays as input arguments, and that they interact properly with TensorFlow. Test case for a circuit that returns a scalar.""" # The objective of this test is not to check if the results are correctly calculated, # but to check that the interoperability of the different return types works. @qml.qnode(qubit_device_1_wire, interface='tf') def circuit(dummy1, array, dummy2): qml.RY(0.5 * array[0,1], wires=0) qml.RY(-0.5 * array[1,1], wires=0) return qml.expval(qml.PauliX(0)) # returns a scalar grad_target = (np.array(1.), np.array([[0.5, 0.43879, 0], [0, -0.43879, 0]]), np.array(-0.4)) cost_target = 1.03257 args = (Variable(0.46), Variable([[2., 3., 0.3], [7., 4., 2.1]]), Variable(-0.13)) def cost(x, array, y): c = tf.cast(circuit(tf.constant(0.111), array, tf.constant(4.5)), tf.float32) return c +0.5*array[0,0] +x -0.4*y with tf.GradientTape() as tape: cost_res = cost(*args) grad_res = np.array([i.numpy() for i in tape.gradient(cost_res, [args[0], args[2]])]) assert np.allclose(cost_res.numpy(), cost_target, atol=tol, rtol=0) assert np.allclose(grad_res, np.fromiter(grad_target[::2], dtype=np.float32), atol=tol, rtol=0) def test_qnode_array_parameters_1_vector_return(self, qubit_device_1_wire, tol): """Test that QNode can take arrays as input arguments, and that they interact properly with TensorFlow Test case for a circuit that returns a 1-vector.""" # The objective of this test is not to check if the results are correctly calculated, # but to check that the interoperability of the different return types works. @qml.qnode(qubit_device_1_wire, interface='tf') def circuit(dummy1, array, dummy2): qml.RY(0.5 * array[0,1], wires=0) qml.RY(-0.5 * array[1,1], wires=0) return qml.expval(qml.PauliX(0)), # note the comma, returns a 1-vector grad_target = (np.array(1.), np.array([[0.5, 0.43879, 0], [0, -0.43879, 0]]), np.array(-0.4)) cost_target = 1.03257 args = (Variable(0.46), Variable([[2., 3., 0.3], [7., 4., 2.1]]), Variable(-0.13)) def cost(x, array, y): c = tf.cast(circuit(tf.constant(0.111), array, tf.constant(4.5)), tf.float32) c = c[0] # get a scalar return c +0.5*array[0,0] +x -0.4*y with tf.GradientTape() as tape: cost_res = cost(*args) grad_res = np.array([i.numpy() for i in tape.gradient(cost_res, [args[0], args[2]])]) assert np.allclose(cost_res.numpy(), cost_target, atol=tol, rtol=0) assert np.allclose(grad_res, np.fromiter(grad_target[::2], dtype=np.float32), atol=tol, rtol=0) def test_qnode_array_parameters_2_vector_return(self, qubit_device_2_wires, tol): """Test that QNode can take arrays as input arguments, and that they interact properly with TensorFlow Test case for a circuit that returns a 2-vector.""" # The objective of this test is not to check if the results are correctly calculated, # but to check that the interoperability of the different return types works. @qml.qnode(qubit_device_2_wires, interface='tf') def circuit(dummy1, array, dummy2): qml.RY(0.5 * array[0,1], wires=0) qml.RY(-0.5 * array[1,1], wires=0) qml.RY(array[1,0], wires=1) return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1)) # returns a 2-vector grad_target = (np.array(1.), np.array([[0.5, 0.43879, 0], [0, -0.43879, 0]]), np.array(-0.4)) cost_target = 1.03257 args = (Variable(0.46), Variable([[2., 3., 0.3], [7., 4., 2.1]]), Variable(-0.13)) def cost(x, array, y): c = tf.cast(circuit(tf.constant(0.111), array, tf.constant(4.5)), tf.float32) c = c[0] # get a scalar return c +0.5*array[0,0] +x -0.4*y with tf.GradientTape() as tape: cost_res = cost(*args) grad_res = np.array([i.numpy() for i in tape.gradient(cost_res, [args[0], args[2]])]) assert np.allclose(cost_res.numpy(), cost_target, atol=tol, rtol=0) assert np.allclose(grad_res, np.fromiter(grad_target[::2], dtype=np.float32), atol=tol, rtol=0) def test_array_parameters_evaluate(self, qubit_device_2_wires, tol): """Test that array parameters gives same result as positional arguments.""" a, b, c = tf.constant(0.5), tf.constant(0.54), tf.constant(0.3) def ansatz(x, y, z): qml.QubitStateVector(np.array([1, 0, 1, 1])/np.sqrt(3), wires=[0, 1]) qml.Rot(x, y, z, wires=0) qml.CNOT(wires=[0, 1]) return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1)) @qml.qnode(qubit_device_2_wires, interface='tf') def circuit1(x, y, z): return ansatz(x, y, z) @qml.qnode(qubit_device_2_wires, interface='tf') def circuit2(x, array): return ansatz(x, array[0], array[1]) @qml.qnode(qubit_device_2_wires, interface='tf') def circuit3(array): return ansatz(*array) positional_res = circuit1(a, b, c) array_res1 = circuit2(a, Variable([b, c])) array_res2 = circuit3(Variable([a, b, c])) assert np.allclose(positional_res.numpy(), array_res1.numpy(), atol=tol, rtol=0) assert np.allclose(positional_res.numpy(), array_res2.numpy(), atol=tol, rtol=0) def test_multiple_expectation_different_wires(self, qubit_device_2_wires, tol): """Tests that qnodes return multiple expectation values.""" a, b, c = Variable(0.5), Variable(0.54), Variable(0.3) @qml.qnode(qubit_device_2_wires, interface='tf') def circuit(x, y, z): qml.RX(x, wires=[0]) qml.RZ(y, wires=[0]) qml.CNOT(wires=[0, 1]) qml.RY(y, wires=[0]) qml.RX(z, wires=[0]) return qml.expval(qml.PauliY(0)), qml.expval(qml.PauliZ(1)) res = circuit(a, b, c) out_state = np.kron(Rotx(c.numpy()), I) @ np.kron(Roty(b.numpy()), I) @ CNOT \ @ np.kron(Rotz(b.numpy()), I) @ np.kron(Rotx(a.numpy()), I) @ np.array([1, 0, 0, 0]) ex0 = np.vdot(out_state, np.kron(Y, I) @ out_state) ex1 = np.vdot(out_state, np.kron(I, Z) @ out_state) ex = np.array([ex0, ex1]) assert np.allclose(ex, res.numpy(), atol=tol, rtol=0) def test_multiple_keywordargs_used(self, qubit_device_2_wires, tol): """Tests that qnodes use multiple keyword arguments.""" @qml.qnode(qubit_device_2_wires, interface='tf') def circuit(w, x=None, y=None): qml.RX(x, wires=[0]) qml.RX(y, wires=[1]) return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)) c = circuit(tf.constant(1.), x=np.pi, y=np.pi) assert np.allclose(c.numpy(), [-1., -1.], atol=tol, rtol=0) def test_multidimensional_keywordargs_used(self, qubit_device_2_wires, tol): """Tests that qnodes use multi-dimensional keyword arguments.""" def circuit(w, x=None): qml.RX(x[0], wires=[0]) qml.RX(x[1], wires=[1]) return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)) circuit = qml.QNode(circuit, qubit_device_2_wires).to_tf() c = circuit(tf.constant(1.), x=[np.pi, np.pi]) assert np.allclose(c.numpy(), [-1., -1.], atol=tol, rtol=0) def test_keywordargs_for_wires(self, qubit_device_2_wires, tol): """Tests that wires can be passed as keyword arguments.""" default_q = 0 def circuit(x, q=default_q): qml.RY(x, wires=0) return qml.expval(qml.PauliZ(q)) circuit = qml.QNode(circuit, qubit_device_2_wires).to_tf() c = circuit(tf.constant(np.pi), q=1) assert np.allclose(c, 1., atol=tol, rtol=0) c = circuit(tf.constant(np.pi)) assert np.allclose(c.numpy(), -1., atol=tol, rtol=0) def test_keywordargs_used(self, qubit_device_1_wire, tol): """Tests that qnodes use keyword arguments.""" def circuit(w, x=None): qml.RX(x, wires=[0]) return qml.expval(qml.PauliZ(0)) circuit = qml.QNode(circuit, qubit_device_1_wire).to_tf() c = circuit(tf.constant(1.), x=np.pi) assert np.allclose(c.numpy(), -1., atol=tol, rtol=0) def test_mixture_numpy_tensors(self, qubit_device_2_wires, tol): """Tests that qnodes work with python types and tensors.""" @qml.qnode(qubit_device_2_wires, interface='tf') def circuit(w, x, y): qml.RX(x, wires=[0]) qml.RX(y, wires=[1]) return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)) c = circuit(tf.constant(1.), np.pi, np.pi).numpy() assert np.allclose(c, [-1., -1.], atol=tol, rtol=0) def test_keywordarg_updated_in_multiple_calls(self, qubit_device_2_wires): """Tests that qnodes update keyword arguments in consecutive calls.""" def circuit(w, x=None): qml.RX(w, wires=[0]) qml.RX(x, wires=[1]) return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)) circuit = qml.QNode(circuit, qubit_device_2_wires).to_tf() c1 = circuit(tf.constant(0.1), x=tf.constant(0.)) c2 = circuit(tf.constant(0.1), x=np.pi) assert c1[1] != c2[1] def test_keywordarg_passes_through_classicalnode(self, qubit_device_2_wires, tol): """Tests that qnodes' keyword arguments pass through classical nodes.""" def circuit(w, x=None): qml.RX(w, wires=[0]) qml.RX(x, wires=[1]) return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)) circuit = qml.QNode(circuit, qubit_device_2_wires).to_tf() def classnode(w, x=None): return circuit(w, x=x) c = classnode(tf.constant(0.), x=np.pi) assert np.allclose(c.numpy(), [1., -1.], atol=tol, rtol=0) def test_keywordarg_gradient(self, qubit_device_2_wires, tol): """Tests that qnodes' keyword arguments work with gradients""" def circuit(x, y, input_state=np.array([0, 0])): qml.BasisState(input_state, wires=[0, 1]) qml.RX(x, wires=[0]) qml.RY(y, wires=[0]) return qml.expval(qml.PauliZ(0)) circuit = qml.QNode(circuit, qubit_device_2_wires).to_tf() x = 0.543 y = 0.45632 expected_grad = np.array([np.sin(x)*np.cos(y), np.sin(y)*np.cos(x)]) x_t = Variable(x) y_t = Variable(y) # test first basis state against analytic result with tf.GradientTape() as tape: c = circuit(x_t, y_t, input_state=np.array([0, 0])) grads = np.array(tape.gradient(c, [x_t, y_t])) assert np.allclose(grads, -expected_grad, atol=tol, rtol=0) # test third basis state against analytic result with tf.GradientTape() as tape: c = circuit(x_t, y_t, input_state=np.array([1, 0])) grads = np.array(tape.gradient(c, [x_t, y_t])) assert np.allclose(grads, expected_grad, atol=tol, rtol=0) # test first basis state via the default keyword argument against analytic result with tf.GradientTape() as tape: c = circuit(x_t, y_t) grads = np.array(tape.gradient(c, [x_t, y_t])) assert np.allclose(grads, -expected_grad, atol=tol, rtol=0) @pytest.mark.usefixtures("skip_if_no_tf_support") class TestIntegration: """Integration tests to ensure the TensorFlow QNode agrees with the NumPy QNode""" def test_qnode_evaluation_agrees(self, qubit_device_2_wires, tol): """Tests that simple example is consistent.""" @qml.qnode(qubit_device_2_wires, interface='autograd') def circuit(phi, theta): qml.RX(phi[0], wires=0) qml.RY(phi[1], wires=1) qml.CNOT(wires=[0, 1]) qml.PhaseShift(theta[0], wires=0) return qml.expval(qml.PauliZ(0)) @qml.qnode(qubit_device_2_wires, interface='tf') def circuit_tf(phi, theta): qml.RX(phi[0], wires=0) qml.RY(phi[1], wires=1) qml.CNOT(wires=[0, 1]) qml.PhaseShift(theta[0], wires=0) return qml.expval(qml.PauliZ(0)) phi = [0.5, 0.1] theta = [0.2] phi_t = Variable(phi) theta_t = Variable(theta) autograd_eval = circuit(phi, theta) tf_eval = circuit_tf(phi_t, theta_t) assert np.allclose(autograd_eval, tf_eval.numpy(), atol=tol, rtol=0) def test_qnode_gradient_agrees(self, qubit_device_2_wires, tol): """Tests that simple gradient example is consistent.""" @qml.qnode(qubit_device_2_wires, interface='autograd') def circuit(phi, theta): qml.RX(phi[0], wires=0) qml.RY(phi[1], wires=1) qml.CNOT(wires=[0, 1]) qml.PhaseShift(theta[0], wires=0) return qml.expval(qml.PauliZ(0)) @qml.qnode(qubit_device_2_wires, interface='tf') def circuit_tf(phi, theta): qml.RX(phi[0], wires=0) qml.RY(phi[1], wires=1) qml.CNOT(wires=[0, 1]) qml.PhaseShift(theta[0], wires=0) return qml.expval(qml.PauliZ(0)) phi = [0.5, 0.1] theta = [0.2] phi_t = Variable(phi) theta_t = Variable(theta) dcircuit = qml.grad(circuit, [0, 1]) autograd_grad = dcircuit(phi, theta) with tf.GradientTape() as g: g.watch([phi_t, theta_t]) y = circuit_tf(phi_t, theta_t) tf_grad = g.gradient(y, [phi_t, theta_t]) assert np.allclose(autograd_grad[0], tf_grad[0], atol=tol, rtol=0) assert np.allclose(autograd_grad[1], tf_grad[1], atol=tol, rtol=0) gradient_test_data = [ (0.5, -0.1), (0.0, np.pi), (-3.6, -3.6), (1.0, 2.5), ] @pytest.mark.usefixtures("skip_if_no_tf_support") class TestTFGradients: """Integration tests involving gradients of QNodes and hybrid computations using the tf interface""" @pytest.fixture def qnodes(self): """Two QNodes to be used for the gradient tests""" dev = qml.device("default.qubit", wires=2) @qml.qnode(dev, interface="tf") def f(x): qml.RX(x, wires=0) return qml.expval(qml.PauliZ(0)) @qml.qnode(dev, interface="tf") def g(y): qml.RY(y, wires=0) return qml.expval(qml.PauliX(0)) return f, g @pytest.mark.parametrize("x, y", gradient_test_data) def test_addition_qnodes_gradient(self, qnodes, x, y): """Test the gradient of addition of two QNode circuits""" f, g = qnodes def add(a, b): return a + b xt = Variable(x) yt = Variable(y) # addition with tf.GradientTape() as tape: tape.watch([xt, yt]) a = f(xt) b = g(yt) y = add(a, b) grad = tape.gradient(y, [a, b]) assert grad[0].numpy() == 1.0 assert grad[1].numpy() == 1.0 # same tensor added to itself with tf.GradientTape() as tape: tape.watch([xt, yt]) a = f(xt) y = add(a, a) grad = tape.gradient(y, [a, a]) assert grad[0].numpy() == 2.0 assert grad[1].numpy() == 2.0 # different qnodes with same input parameter added together with tf.GradientTape() as tape: tape.watch([xt, yt]) a = f(xt) b = g(xt) y = add(a, b) grad = tape.gradient(y, [a, b]) assert grad[0].numpy() == 1.0 assert grad[1].numpy() == 1.0 @pytest.mark.parametrize("x, y", gradient_test_data) def test_subtraction_qnodes_gradient(self, qnodes, x, y): """Test the gradient of subtraction of two QNode circuits""" f, g = qnodes def subtract(a, b): return a - b xt = Variable(x) yt = Variable(y) # subtraction with tf.GradientTape() as tape: tape.watch([xt, yt]) a = f(xt) b = g(yt) y = subtract(a, b) grad = tape.gradient(y, [a, b]) assert grad[0].numpy() == 1.0 assert grad[1].numpy() == -1.0 @pytest.mark.parametrize("x, y", gradient_test_data) def test_multiplication_qnodes_gradient(self, qnodes, x, y): """Test the gradient of multiplication of two QNode circuits""" f, g = qnodes def mult(a, b): return a * b xt = Variable(x) yt = Variable(y) # multiplication with tf.GradientTape() as tape: tape.watch([xt, yt]) a = f(xt) b = g(yt) y = mult(a, b) grad = tape.gradient(y, [a, b]) assert grad[0].numpy() == b.numpy() assert grad[1].numpy() == a.numpy() @pytest.mark.parametrize("x, y", gradient_test_data) def test_division_qnodes_gradient(self, qnodes, x, y, tol): """Test the gradient of division of two QNode circuits""" f, g = qnodes def div(a, b): return a / b xt = Variable(x) yt = Variable(y) # division with tf.GradientTape() as tape: tape.watch([xt, yt]) a = f(xt) b = g(yt) y = div(a, b) grad = tape.gradient(y, [a, b]) assert grad[0].numpy() == 1 / b.numpy() assert np.allclose(grad[1].numpy(), -a.numpy() / b.numpy() ** 2, atol=tol, rtol=0) @pytest.mark.parametrize("x, y", gradient_test_data) def test_composition_qnodes_gradient(self, qnodes, x, y): """Test the gradient of composition of two QNode circuits""" f, g = qnodes xt = Variable(x) yt = Variable(y) # compose function with xt as input with tf.GradientTape() as tape: tape.watch([xt]) y = f(xt) grad1 = tape.gradient(y, xt) with tf.GradientTape() as tape: tape.watch([xt]) y = f(xt) grad2 = tape.gradient(y, xt) assert tf.equal(grad1, grad2) # compose function with a as input with tf.GradientTape() as tape: tape.watch([xt]) a = f(xt) y = f(a) grad1 = tape.gradient(y, a) with tf.GradientTape() as tape: tape.watch([xt]) a = f(xt) y = f(a) grad2 = tape.gradient(y, a) assert tf.equal(grad1, grad2) # compose function with b as input with tf.GradientTape() as tape: tape.watch([xt]) b = g(xt) y = g(b) grad1 = tape.gradient(y, b) with tf.GradientTape() as tape: tape.watch([xt]) b = g(xt) y = g(b) grad2 = tape.gradient(y, b) assert tf.equal(grad1, grad2)
artifacts/old_dataset_versions/minimal_commits/pennylane/pennylane#385/after/test_tf.py
26,292
Integration tests to ensure the TensorFlow QNode agrees with the NumPy QNode Integration tests involving gradients of QNodes and hybrid computations using the tf interface TFQNode basic tests. Test that the TFQNode properly handles the parameters of qfuncs Two QNodes to be used for the gradient tests Test the gradient of addition of two QNode circuits Test that array parameters gives same result as positional arguments. Test the gradient of composition of two QNode circuits Test the gradient of division of two QNode circuits Tests that qnodes' keyword arguments work with gradients Tests that qnodes' keyword arguments pass through classical nodes. Tests that qnodes update keyword arguments in consecutive calls. Tests that wires can be passed as keyword arguments. Tests that qnodes use keyword arguments. Tests that qnodes work with python types and tensors. Tests that qnodes use multi-dimensional keyword arguments. Tests that qnodes return multiple expectation values. Tests that qnodes use multiple keyword arguments. Test the gradient of multiplication of two QNode circuits Test that QNode can take arrays as input arguments, and that they interact properly with TensorFlow Test case for a circuit that returns a 1-vector. Test that QNode can take arrays as input arguments, and that they interact properly with TensorFlow Test case for a circuit that returns a 2-vector. Test that QNode can take arrays as input arguments, and that they interact properly with TensorFlow. Test case for a circuit that returns a scalar. Tests that simple example is consistent. A qubit device cannot measure CV observables A qubit device cannot execute CV operations CV and discrete operations must not be mixed All expectation values in the qfunc must be returned Gates have to precede measurements A wire can only be measured once The device must have sufficient wires for the qfunc Expvals must be returned in the order they were created in The qfunc must return only Expectations Tests that qnodes can compute the correct function when the same parameter is used in multiple gates. Tests that simple gradient example is consistent. Test the gradient of subtraction of two QNode circuits Boolean fixture for TensorFlow support Unit tests for the :mod:`pennylane.interface.tf` QNode interface. Copyright 2018 Xanadu Quantum Technologies Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. The objective of this test is not to check if the results are correctly calculated, but to check that the interoperability of the different return types works. returns a scalar The objective of this test is not to check if the results are correctly calculated, but to check that the interoperability of the different return types works. note the comma, returns a 1-vector get a scalar The objective of this test is not to check if the results are correctly calculated, but to check that the interoperability of the different return types works. returns a 2-vector get a scalar test first basis state against analytic result test third basis state against analytic result test first basis state via the default keyword argument against analytic result addition same tensor added to itself different qnodes with same input parameter added together subtraction multiplication division compose function with xt as input compose function with a as input compose function with b as input
3,850
en
0.820635
# Version of the library that will be used to upload to pypi __version__ = "0.28.0.dev0" # Git tag that will be checked to determine whether to trigger upload to pypi __release_tag__ = None
gym-unity/gym_unity/__init__.py
191
Version of the library that will be used to upload to pypi Git tag that will be checked to determine whether to trigger upload to pypi
134
en
0.900569
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for L{twisted.web.distrib}. """ from os.path import abspath from xml.dom.minidom import parseString try: import pwd except ImportError: pwd = None from zope.interface.verify import verifyObject from twisted.python import filepath, failure from twisted.internet import reactor, defer from twisted.trial import unittest from twisted.spread import pb from twisted.spread.banana import SIZE_LIMIT from twisted.web import distrib, client, resource, static, server from twisted.web.test.test_web import DummyRequest, DummyChannel from twisted.web.test._util import _render from twisted.test import proto_helpers from twisted.web.http_headers import Headers from twisted.logger import globalLogPublisher class MySite(server.Site): pass class PBServerFactory(pb.PBServerFactory): """ A PB server factory which keeps track of the most recent protocol it created. @ivar proto: L{None} or the L{Broker} instance most recently returned from C{buildProtocol}. """ proto = None def buildProtocol(self, addr): self.proto = pb.PBServerFactory.buildProtocol(self, addr) return self.proto class ArbitraryError(Exception): """ An exception for this test. """ class DistribTests(unittest.TestCase): port1 = None port2 = None sub = None f1 = None def tearDown(self): """ Clean up all the event sources left behind by either directly by test methods or indirectly via some distrib API. """ dl = [defer.Deferred(), defer.Deferred()] if self.f1 is not None and self.f1.proto is not None: self.f1.proto.notifyOnDisconnect(lambda: dl[0].callback(None)) else: dl[0].callback(None) if self.sub is not None and self.sub.publisher is not None: self.sub.publisher.broker.notifyOnDisconnect( lambda: dl[1].callback(None)) self.sub.publisher.broker.transport.loseConnection() else: dl[1].callback(None) if self.port1 is not None: dl.append(self.port1.stopListening()) if self.port2 is not None: dl.append(self.port2.stopListening()) return defer.gatherResults(dl) def testDistrib(self): # site1 is the publisher r1 = resource.Resource() r1.putChild(b"there", static.Data(b"root", "text/plain")) site1 = server.Site(r1) self.f1 = PBServerFactory(distrib.ResourcePublisher(site1)) self.port1 = reactor.listenTCP(0, self.f1) self.sub = distrib.ResourceSubscription("127.0.0.1", self.port1.getHost().port) r2 = resource.Resource() r2.putChild(b"here", self.sub) f2 = MySite(r2) self.port2 = reactor.listenTCP(0, f2) agent = client.Agent(reactor) url = "http://127.0.0.1:{}/here/there".format( self.port2.getHost().port) url = url.encode("ascii") d = agent.request(b"GET", url) d.addCallback(client.readBody) d.addCallback(self.assertEqual, b'root') return d def _setupDistribServer(self, child): """ Set up a resource on a distrib site using L{ResourcePublisher}. @param child: The resource to publish using distrib. @return: A tuple consisting of the host and port on which to contact the created site. """ distribRoot = resource.Resource() distribRoot.putChild(b"child", child) distribSite = server.Site(distribRoot) self.f1 = distribFactory = PBServerFactory( distrib.ResourcePublisher(distribSite)) distribPort = reactor.listenTCP( 0, distribFactory, interface="127.0.0.1") self.addCleanup(distribPort.stopListening) addr = distribPort.getHost() self.sub = mainRoot = distrib.ResourceSubscription( addr.host, addr.port) mainSite = server.Site(mainRoot) mainPort = reactor.listenTCP(0, mainSite, interface="127.0.0.1") self.addCleanup(mainPort.stopListening) mainAddr = mainPort.getHost() return mainPort, mainAddr def _requestTest(self, child, **kwargs): """ Set up a resource on a distrib site using L{ResourcePublisher} and then retrieve it from a L{ResourceSubscription} via an HTTP client. @param child: The resource to publish using distrib. @param **kwargs: Extra keyword arguments to pass to L{Agent.request} when requesting the resource. @return: A L{Deferred} which fires with the result of the request. """ mainPort, mainAddr = self._setupDistribServer(child) agent = client.Agent(reactor) url = "http://%s:%s/child" % (mainAddr.host, mainAddr.port) url = url.encode("ascii") d = agent.request(b"GET", url, **kwargs) d.addCallback(client.readBody) return d def _requestAgentTest(self, child, **kwargs): """ Set up a resource on a distrib site using L{ResourcePublisher} and then retrieve it from a L{ResourceSubscription} via an HTTP client. @param child: The resource to publish using distrib. @param **kwargs: Extra keyword arguments to pass to L{Agent.request} when requesting the resource. @return: A L{Deferred} which fires with a tuple consisting of a L{twisted.test.proto_helpers.AccumulatingProtocol} containing the body of the response and an L{IResponse} with the response itself. """ mainPort, mainAddr = self._setupDistribServer(child) url = "http://{}:{}/child".format(mainAddr.host, mainAddr.port) url = url.encode("ascii") d = client.Agent(reactor).request(b"GET", url, **kwargs) def cbCollectBody(response): protocol = proto_helpers.AccumulatingProtocol() response.deliverBody(protocol) d = protocol.closedDeferred = defer.Deferred() d.addCallback(lambda _: (protocol, response)) return d d.addCallback(cbCollectBody) return d def test_requestHeaders(self): """ The request headers are available on the request object passed to a distributed resource's C{render} method. """ requestHeaders = {} logObserver = proto_helpers.EventLoggingObserver() globalLogPublisher.addObserver(logObserver) req = [None] class ReportRequestHeaders(resource.Resource): def render(self, request): req[0] = request requestHeaders.update(dict( request.requestHeaders.getAllRawHeaders())) return b"" def check_logs(): msgs = [e["log_format"] for e in logObserver] self.assertIn('connected to publisher', msgs) self.assertIn( "could not connect to distributed web service: {msg}", msgs ) self.assertIn(req[0], msgs) globalLogPublisher.removeObserver(logObserver) request = self._requestTest( ReportRequestHeaders(), headers=Headers({'foo': ['bar']})) def cbRequested(result): self.f1.proto.notifyOnDisconnect(check_logs) self.assertEqual(requestHeaders[b'Foo'], [b'bar']) request.addCallback(cbRequested) return request def test_requestResponseCode(self): """ The response code can be set by the request object passed to a distributed resource's C{render} method. """ class SetResponseCode(resource.Resource): def render(self, request): request.setResponseCode(200) return "" request = self._requestAgentTest(SetResponseCode()) def cbRequested(result): self.assertEqual(result[0].data, b"") self.assertEqual(result[1].code, 200) self.assertEqual(result[1].phrase, b"OK") request.addCallback(cbRequested) return request def test_requestResponseCodeMessage(self): """ The response code and message can be set by the request object passed to a distributed resource's C{render} method. """ class SetResponseCode(resource.Resource): def render(self, request): request.setResponseCode(200, b"some-message") return "" request = self._requestAgentTest(SetResponseCode()) def cbRequested(result): self.assertEqual(result[0].data, b"") self.assertEqual(result[1].code, 200) self.assertEqual(result[1].phrase, b"some-message") request.addCallback(cbRequested) return request def test_largeWrite(self): """ If a string longer than the Banana size limit is passed to the L{distrib.Request} passed to the remote resource, it is broken into smaller strings to be transported over the PB connection. """ class LargeWrite(resource.Resource): def render(self, request): request.write(b'x' * SIZE_LIMIT + b'y') request.finish() return server.NOT_DONE_YET request = self._requestTest(LargeWrite()) request.addCallback(self.assertEqual, b'x' * SIZE_LIMIT + b'y') return request def test_largeReturn(self): """ Like L{test_largeWrite}, but for the case where C{render} returns a long string rather than explicitly passing it to L{Request.write}. """ class LargeReturn(resource.Resource): def render(self, request): return b'x' * SIZE_LIMIT + b'y' request = self._requestTest(LargeReturn()) request.addCallback(self.assertEqual, b'x' * SIZE_LIMIT + b'y') return request def test_connectionLost(self): """ If there is an error issuing the request to the remote publisher, an error response is returned. """ # Using pb.Root as a publisher will cause request calls to fail with an # error every time. Just what we want to test. self.f1 = serverFactory = PBServerFactory(pb.Root()) self.port1 = serverPort = reactor.listenTCP(0, serverFactory) self.sub = subscription = distrib.ResourceSubscription( "127.0.0.1", serverPort.getHost().port) request = DummyRequest([b'']) d = _render(subscription, request) def cbRendered(ignored): self.assertEqual(request.responseCode, 500) # This is the error we caused the request to fail with. It should # have been logged. errors = self.flushLoggedErrors(pb.NoSuchMethod) self.assertEqual(len(errors), 1) # The error page is rendered as HTML. expected = [ b'', b'<html>', b' <head><title>500 - Server Connection Lost</title></head>', b' <body>', b' <h1>Server Connection Lost</h1>', b' <p>Connection to distributed server lost:' b'<pre>' b'[Failure instance: Traceback from remote host -- ' b'twisted.spread.flavors.NoSuchMethod: ' b'No such method: remote_request', b']</pre></p>', b' </body>', b'</html>', b'' ] self.assertEqual([b'\n'.join(expected)], request.written) d.addCallback(cbRendered) return d def test_logFailed(self): """ When a request fails, the string form of the failure is logged. """ logObserver = proto_helpers.EventLoggingObserver.createWithCleanup( self, globalLogPublisher ) f = failure.Failure(ArbitraryError()) request = DummyRequest([b'']) issue = distrib.Issue(request) issue.failed(f) self.assertEquals(1, len(logObserver)) self.assertIn( "Failure instance", logObserver[0]["log_format"] ) def test_requestFail(self): """ When L{twisted.web.distrib.Request}'s fail is called, the failure is logged. """ logObserver = proto_helpers.EventLoggingObserver.createWithCleanup( self, globalLogPublisher ) err = ArbitraryError() f = failure.Failure(err) req = distrib.Request(DummyChannel()) req.fail(f) self.flushLoggedErrors(ArbitraryError) self.assertEquals(1, len(logObserver)) self.assertIs(logObserver[0]["log_failure"], f) class _PasswordDatabase: def __init__(self, users): self._users = users def getpwall(self): return iter(self._users) def getpwnam(self, username): for user in self._users: if user[0] == username: return user raise KeyError() class UserDirectoryTests(unittest.TestCase): """ Tests for L{UserDirectory}, a resource for listing all user resources available on a system. """ def setUp(self): self.alice = ('alice', 'x', 123, 456, 'Alice,,,', self.mktemp(), '/bin/sh') self.bob = ('bob', 'x', 234, 567, 'Bob,,,', self.mktemp(), '/bin/sh') self.database = _PasswordDatabase([self.alice, self.bob]) self.directory = distrib.UserDirectory(self.database) def test_interface(self): """ L{UserDirectory} instances provide L{resource.IResource}. """ self.assertTrue(verifyObject(resource.IResource, self.directory)) def _404Test(self, name): """ Verify that requesting the C{name} child of C{self.directory} results in a 404 response. """ request = DummyRequest([name]) result = self.directory.getChild(name, request) d = _render(result, request) def cbRendered(ignored): self.assertEqual(request.responseCode, 404) d.addCallback(cbRendered) return d def test_getInvalidUser(self): """ L{UserDirectory.getChild} returns a resource which renders a 404 response when passed a string which does not correspond to any known user. """ return self._404Test('carol') def test_getUserWithoutResource(self): """ L{UserDirectory.getChild} returns a resource which renders a 404 response when passed a string which corresponds to a known user who has neither a user directory nor a user distrib socket. """ return self._404Test('alice') def test_getPublicHTMLChild(self): """ L{UserDirectory.getChild} returns a L{static.File} instance when passed the name of a user with a home directory containing a I{public_html} directory. """ home = filepath.FilePath(self.bob[-2]) public_html = home.child('public_html') public_html.makedirs() request = DummyRequest(['bob']) result = self.directory.getChild('bob', request) self.assertIsInstance(result, static.File) self.assertEqual(result.path, public_html.path) def test_getDistribChild(self): """ L{UserDirectory.getChild} returns a L{ResourceSubscription} instance when passed the name of a user suffixed with C{".twistd"} who has a home directory containing a I{.twistd-web-pb} socket. """ home = filepath.FilePath(self.bob[-2]) home.makedirs() web = home.child('.twistd-web-pb') request = DummyRequest(['bob']) result = self.directory.getChild('bob.twistd', request) self.assertIsInstance(result, distrib.ResourceSubscription) self.assertEqual(result.host, 'unix') self.assertEqual(abspath(result.port), web.path) def test_invalidMethod(self): """ L{UserDirectory.render} raises L{UnsupportedMethod} in response to a non-I{GET} request. """ request = DummyRequest(['']) request.method = 'POST' self.assertRaises( server.UnsupportedMethod, self.directory.render, request) def test_render(self): """ L{UserDirectory} renders a list of links to available user content in response to a I{GET} request. """ public_html = filepath.FilePath(self.alice[-2]).child('public_html') public_html.makedirs() web = filepath.FilePath(self.bob[-2]) web.makedirs() # This really only works if it's a unix socket, but the implementation # doesn't currently check for that. It probably should someday, and # then skip users with non-sockets. web.child('.twistd-web-pb').setContent(b"") request = DummyRequest(['']) result = _render(self.directory, request) def cbRendered(ignored): document = parseString(b''.join(request.written)) # Each user should have an li with a link to their page. [alice, bob] = document.getElementsByTagName('li') self.assertEqual(alice.firstChild.tagName, 'a') self.assertEqual(alice.firstChild.getAttribute('href'), 'alice/') self.assertEqual(alice.firstChild.firstChild.data, 'Alice (file)') self.assertEqual(bob.firstChild.tagName, 'a') self.assertEqual(bob.firstChild.getAttribute('href'), 'bob.twistd/') self.assertEqual(bob.firstChild.firstChild.data, 'Bob (twistd)') result.addCallback(cbRendered) return result def test_passwordDatabase(self): """ If L{UserDirectory} is instantiated with no arguments, it uses the L{pwd} module as its password database. """ directory = distrib.UserDirectory() self.assertIdentical(directory._pwd, pwd) if pwd is None: test_passwordDatabase.skip = "pwd module required"
stackoverflow/venv/lib/python3.6/site-packages/twisted/web/test/test_distrib.py
18,288
An exception for this test. A PB server factory which keeps track of the most recent protocol it created. @ivar proto: L{None} or the L{Broker} instance most recently returned from C{buildProtocol}. Tests for L{UserDirectory}, a resource for listing all user resources available on a system. Verify that requesting the C{name} child of C{self.directory} results in a 404 response. Set up a resource on a distrib site using L{ResourcePublisher} and then retrieve it from a L{ResourceSubscription} via an HTTP client. @param child: The resource to publish using distrib. @param **kwargs: Extra keyword arguments to pass to L{Agent.request} when requesting the resource. @return: A L{Deferred} which fires with a tuple consisting of a L{twisted.test.proto_helpers.AccumulatingProtocol} containing the body of the response and an L{IResponse} with the response itself. Set up a resource on a distrib site using L{ResourcePublisher} and then retrieve it from a L{ResourceSubscription} via an HTTP client. @param child: The resource to publish using distrib. @param **kwargs: Extra keyword arguments to pass to L{Agent.request} when requesting the resource. @return: A L{Deferred} which fires with the result of the request. Set up a resource on a distrib site using L{ResourcePublisher}. @param child: The resource to publish using distrib. @return: A tuple consisting of the host and port on which to contact the created site. Clean up all the event sources left behind by either directly by test methods or indirectly via some distrib API. If there is an error issuing the request to the remote publisher, an error response is returned. L{UserDirectory.getChild} returns a L{ResourceSubscription} instance when passed the name of a user suffixed with C{".twistd"} who has a home directory containing a I{.twistd-web-pb} socket. L{UserDirectory.getChild} returns a resource which renders a 404 response when passed a string which does not correspond to any known user. L{UserDirectory.getChild} returns a L{static.File} instance when passed the name of a user with a home directory containing a I{public_html} directory. L{UserDirectory.getChild} returns a resource which renders a 404 response when passed a string which corresponds to a known user who has neither a user directory nor a user distrib socket. L{UserDirectory} instances provide L{resource.IResource}. L{UserDirectory.render} raises L{UnsupportedMethod} in response to a non-I{GET} request. Like L{test_largeWrite}, but for the case where C{render} returns a long string rather than explicitly passing it to L{Request.write}. If a string longer than the Banana size limit is passed to the L{distrib.Request} passed to the remote resource, it is broken into smaller strings to be transported over the PB connection. When a request fails, the string form of the failure is logged. If L{UserDirectory} is instantiated with no arguments, it uses the L{pwd} module as its password database. L{UserDirectory} renders a list of links to available user content in response to a I{GET} request. When L{twisted.web.distrib.Request}'s fail is called, the failure is logged. The request headers are available on the request object passed to a distributed resource's C{render} method. The response code can be set by the request object passed to a distributed resource's C{render} method. The response code and message can be set by the request object passed to a distributed resource's C{render} method. Tests for L{twisted.web.distrib}. Copyright (c) Twisted Matrix Laboratories. See LICENSE for details. site1 is the publisher Using pb.Root as a publisher will cause request calls to fail with an error every time. Just what we want to test. This is the error we caused the request to fail with. It should have been logged. The error page is rendered as HTML. This really only works if it's a unix socket, but the implementation doesn't currently check for that. It probably should someday, and then skip users with non-sockets. Each user should have an li with a link to their page.
4,071
en
0.815905
import pathlib from setuptools import setup here = pathlib.Path(__file__).parent.resolve() # Get the long description from the README file long_description = (here / "README.md").read_text(encoding="utf-8") setup( name="MCsniperPY", version="0.20.6", description="Minecraft name sniper written in Python", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/MCsniperPY/MCsniperPY", author="Kqzz", license="MIT", packages=["mcsniperpy", "mcsniperpy.util", "mcsniperpy.util.classes"], install_requires=["typer", "aiohttp", "colorama", "bs4"], entry_points={"console_scripts": ["mcsniperpy=mcsniperpy.cli:cli"]}, python_requires=">=3.8", classifiers=[ "Development Status :: 4 - Beta", "License :: OSI Approved :: MIT License", # Again, pick a license "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", ], )
setup.py
1,012
Get the long description from the README file Again, pick a license
67
en
0.839625
''' This module hooks fast.ai Learners to Weights & Biases through a callback. Requested logged data can be configured through the callback constructor. Examples: WandbCallback can be used when initializing the Learner:: ``` from wandb.fastai import WandbCallback [...] learn = Learner(data, ..., callback_fns=WandbCallback) learn.fit(epochs) ``` Custom parameters can be given using functools.partial:: ``` from wandb.fastai import WandbCallback from functools import partial [...] learn = Learner(data, ..., callback_fns=partial(WandbCallback, ...)) learn.fit(epochs) ``` Finally, it is possible to use WandbCallback only when starting training. In this case it must be instantiated:: ``` learn.fit(..., callbacks=WandbCallback(learn)) ``` or, with custom parameters:: ``` learn.fit(..., callbacks=WandbCallback(learn, ...)) ``` ''' import wandb import fastai from fastai.callbacks import TrackerCallback from pathlib import Path import random try: import matplotlib matplotlib.use('Agg') # non-interactive backend (avoid tkinter issues) import matplotlib.pyplot as plt except: print('Warning: matplotlib required if logging sample image predictions') class WandbCallback(TrackerCallback): """ Automatically saves model topology, losses & metrics. Optionally logs weights, gradients, sample predictions and best trained model. Args: learn (fastai.basic_train.Learner): the fast.ai learner to hook. log (str): "gradients", "parameters", "all", or None. Losses & metrics are always logged. save_model (bool): save model at the end of each epoch. It will also load best model at the end of training. monitor (str): metric to monitor for saving best model. None uses default TrackerCallback monitor value. mode (str): "auto", "min" or "max" to compare "monitor" values and define best model. input_type (str): "images" or None. Used to display sample predictions. validation_data (list): data used for sample predictions if input_type is set. predictions (int): number of predictions to make if input_type is set and validation_data is None. seed (int): initialize random generator for sample predictions if input_type is set and validation_data is None. """ # Record if watch has been called previously (even in another instance) _watch_called = False def __init__(self, learn, log="gradients", save_model=True, monitor=None, mode='auto', input_type=None, validation_data=None, predictions=36, seed=12345): # Check if wandb.init has been called if wandb.run is None: raise ValueError( 'You must call wandb.init() before WandbCallback()') # Adapted from fast.ai "SaveModelCallback" if monitor is None: # use default TrackerCallback monitor value super().__init__(learn, mode=mode) else: super().__init__(learn, monitor=monitor, mode=mode) self.save_model = save_model self.model_path = Path(wandb.run.dir) / 'bestmodel.pth' self.log = log self.input_type = input_type self.best = None # Select items for sample predictions to see evolution along training self.validation_data = validation_data if input_type and not self.validation_data: wandbRandom = random.Random(seed) # For repeatability predictions = min(predictions, len(learn.data.valid_ds)) indices = wandbRandom.sample(range(len(learn.data.valid_ds)), predictions) self.validation_data = [learn.data.valid_ds[i] for i in indices] def on_train_begin(self, **kwargs): "Call watch method to log model topology, gradients & weights" # Set self.best, method inherited from "TrackerCallback" by "SaveModelCallback" super().on_train_begin() # Ensure we don't call "watch" multiple times if not WandbCallback._watch_called: WandbCallback._watch_called = True # Logs model topology and optionally gradients and weights wandb.watch(self.learn.model, log=self.log) def on_epoch_end(self, epoch, smooth_loss, last_metrics, **kwargs): "Logs training loss, validation loss and custom metrics & log prediction samples & save model" if self.save_model: # Adapted from fast.ai "SaveModelCallback" current = self.get_monitor_value() if current is not None and self.operator(current, self.best): print( 'Better model found at epoch {} with {} value: {}.'.format( epoch, self.monitor, current)) self.best = current # Save within wandb folder with self.model_path.open('wb') as model_file: self.learn.save(model_file) # Log sample predictions if learn.predict is available if self.validation_data: try: self._wandb_log_predictions() except FastaiError as e: wandb.termwarn(e.message) self.validation_data = None # prevent from trying again on next loop except Exception as e: wandb.termwarn("Unable to log prediction samples.\n{}".format(e)) self.validation_data=None # prevent from trying again on next loop # Log losses & metrics # Adapted from fast.ai "CSVLogger" logs = { name: stat for name, stat in list( zip(self.learn.recorder.names, [epoch, smooth_loss] + last_metrics)) } wandb.log(logs) def on_train_end(self, **kwargs): "Load the best model." if self.save_model: # Adapted from fast.ai "SaveModelCallback" if self.model_path.is_file(): with self.model_path.open('rb') as model_file: self.learn.load(model_file, purge=False) print('Loaded best saved model from {}'.format( self.model_path)) def _wandb_log_predictions(self): "Log prediction samples" pred_log = [] for x, y in self.validation_data: try: pred=self.learn.predict(x) except: raise FastaiError('Unable to run "predict" method from Learner to log prediction samples.') # scalar -> likely to be a category if not pred[1].shape: pred_log.append( wandb.Image( x.data, caption='Ground Truth: {}\nPrediction: {}'.format( y, pred[0]))) # most vision datasets have a "show" function we can use elif hasattr(x, "show"): # log input data pred_log.append( wandb.Image(x.data, caption='Input data', grouping=3)) # log label and prediction for im, capt in ((pred[0], "Prediction"), (y, "Ground Truth")): # Resize plot to image resolution # from https://stackoverflow.com/a/13714915 my_dpi = 100 fig = plt.figure(frameon=False, dpi=my_dpi) h, w = x.size fig.set_size_inches(w / my_dpi, h / my_dpi) ax = plt.Axes(fig, [0., 0., 1., 1.]) ax.set_axis_off() fig.add_axes(ax) # Superpose label or prediction to input image x.show(ax=ax, y=im) pred_log.append(wandb.Image(fig, caption=capt)) plt.close(fig) # likely to be an image elif hasattr(y, "shape") and ( (len(y.shape) == 2) or (len(y.shape) == 3 and y.shape[0] in [1, 3, 4])): pred_log.extend([ wandb.Image(x.data, caption='Input data', grouping=3), wandb.Image(pred[0].data, caption='Prediction'), wandb.Image(y.data, caption='Ground Truth') ]) # we just log input data else: pred_log.append(wandb.Image(x.data, caption='Input data')) wandb.log({"Prediction Samples": pred_log}, commit=False) class FastaiError(wandb.Error): pass
wandb/fastai/__init__.py
8,841
Automatically saves model topology, losses & metrics. Optionally logs weights, gradients, sample predictions and best trained model. Args: learn (fastai.basic_train.Learner): the fast.ai learner to hook. log (str): "gradients", "parameters", "all", or None. Losses & metrics are always logged. save_model (bool): save model at the end of each epoch. It will also load best model at the end of training. monitor (str): metric to monitor for saving best model. None uses default TrackerCallback monitor value. mode (str): "auto", "min" or "max" to compare "monitor" values and define best model. input_type (str): "images" or None. Used to display sample predictions. validation_data (list): data used for sample predictions if input_type is set. predictions (int): number of predictions to make if input_type is set and validation_data is None. seed (int): initialize random generator for sample predictions if input_type is set and validation_data is None. Log prediction samples Logs training loss, validation loss and custom metrics & log prediction samples & save model Call watch method to log model topology, gradients & weights Load the best model. This module hooks fast.ai Learners to Weights & Biases through a callback. Requested logged data can be configured through the callback constructor. Examples: WandbCallback can be used when initializing the Learner:: ``` from wandb.fastai import WandbCallback [...] learn = Learner(data, ..., callback_fns=WandbCallback) learn.fit(epochs) ``` Custom parameters can be given using functools.partial:: ``` from wandb.fastai import WandbCallback from functools import partial [...] learn = Learner(data, ..., callback_fns=partial(WandbCallback, ...)) learn.fit(epochs) ``` Finally, it is possible to use WandbCallback only when starting training. In this case it must be instantiated:: ``` learn.fit(..., callbacks=WandbCallback(learn)) ``` or, with custom parameters:: ``` learn.fit(..., callbacks=WandbCallback(learn, ...)) ``` non-interactive backend (avoid tkinter issues) Record if watch has been called previously (even in another instance) Check if wandb.init has been called Adapted from fast.ai "SaveModelCallback" use default TrackerCallback monitor value Select items for sample predictions to see evolution along training For repeatability Set self.best, method inherited from "TrackerCallback" by "SaveModelCallback" Ensure we don't call "watch" multiple times Logs model topology and optionally gradients and weights Adapted from fast.ai "SaveModelCallback" Save within wandb folder Log sample predictions if learn.predict is available prevent from trying again on next loop prevent from trying again on next loop Log losses & metrics Adapted from fast.ai "CSVLogger" Adapted from fast.ai "SaveModelCallback" scalar -> likely to be a category most vision datasets have a "show" function we can use log input data log label and prediction Resize plot to image resolution from https://stackoverflow.com/a/13714915 Superpose label or prediction to input image likely to be an image we just log input data
3,254
en
0.751566
import os from setuptools import setup, Extension from setuptools.command.build_ext import build_ext from Cython.Distutils import build_ext import numpy as np from os.path import join as pjoin from setup_cuda import cuda_setup mpi_compile_args = os.popen("mpic++ --showme:compile").read().strip().split(' ') mpi_link_args = os.popen("mpic++ --showme:link").read().strip().split(' ') def find_in_path(name, path): """Find a file in a search path""" # Adapted fom http://code.activestate.com/recipes/52224 for dir in path.split(os.pathsep): binpath = pjoin(dir, name) if os.path.exists(binpath): return os.path.abspath(binpath) return None try: numpy_include = np.get_include() except AttributeError: numpy_include = np.get_numpy_include() nvcc = find_in_path('nvcc', os.environ['PATH']) if isinstance(nvcc, str): print('CUDA') # setup(name='PackageName', # author='Nina Herrmann', # version='1.0', # description='This is a package for Muesli', # ext_modules=cythonize(cuda_setup.get_module()), # cmdclass={'build_ext': cuda_setup.custom_build_ext()} # ) else: module = Extension('_da', sources=['da.cxx', 'da_wrap.cxx'], include_dirs=[np.get_include(), 'src'], library_dirs=['/usr/include/boost/'], language="c++", swig_opts=['-c++'], libraries=['/usr/include/boost/chrono'], extra_compile_args=(["-fopenmp"] + mpi_compile_args), extra_link_args=(["-fopenmp"] + mpi_link_args) ) setup(name='da', author='Nina Herrmann', version='1.0', description='This is a package for Muesli', ext_modules=[module], py_modules=["da"] )
swig_muesli/muesli/da/setup_da.py
1,900
Find a file in a search path Adapted fom http://code.activestate.com/recipes/52224 setup(name='PackageName', author='Nina Herrmann', version='1.0', description='This is a package for Muesli', ext_modules=cythonize(cuda_setup.get_module()), cmdclass={'build_ext': cuda_setup.custom_build_ext()} )
333
en
0.397683
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import numpy as np from sklearn.decomposition import PCA from reco_utils.dataset.download_utils import maybe_download from IPython import embed def length_normalize(matrix): """Length normalize the matrix Args: matrix (np.ndarray): Input matrix that needs to be normalized Returns: Normalized matrix """ norms = np.sqrt(np.sum(matrix**2, axis=1)) norms[norms == 0] = 1 return matrix / norms[:, np.newaxis] def mean_center(matrix): """Performs mean centering across axis 0 Args: matrix (np.ndarray): Input matrix that needs to be mean centered """ avg = np.mean(matrix, axis=0) matrix -= avg def reduce_dims(matrix, target_dim): """Reduce dimensionality of the data using PCA. Args: matrix (np.ndarray): Matrix of the form (n_sampes, n_features) target_dim (uint): Dimension to which n_features should be reduced to. """ model = PCA(n_components=target_dim) model.fit(matrix) return model.transform(matrix)
reco_utils/recommender/geoimc/geoimc_utils.py
1,124
Length normalize the matrix Args: matrix (np.ndarray): Input matrix that needs to be normalized Returns: Normalized matrix Performs mean centering across axis 0 Args: matrix (np.ndarray): Input matrix that needs to be mean centered Reduce dimensionality of the data using PCA. Args: matrix (np.ndarray): Matrix of the form (n_sampes, n_features) target_dim (uint): Dimension to which n_features should be reduced to. Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License.
532
en
0.782968