{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \"\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"head_commit\",\n \"failed_lite_validators\": [\n \"has_hyperlinks\",\n \"has_removed_files\",\n \"has_many_modified_files\",\n \"has_many_hunks\"\n ],\n \"has_test_patch\": true,\n \"is_lite\": false,\n \"llm_score\": {\n \"difficulty_score\": 1,\n \"issue_text_score\": 1,\n \"test_score\": 2\n },\n \"num_modified_files\": 9\n}"},"version":{"kind":"string","value":"1.0"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"requirements.txt\",\n \"pip_packages\": [\n \"pytest\"\n ],\n \"pre_install\": [\n \"apt-get update\",\n \"apt-get install -y gcc\"\n ],\n \"python\": \"3.9\",\n \"reqs_path\": [\n \"requirements.txt\"\n ],\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"attrs==25.3.0\nAutomat==24.8.1\ncffi==1.17.1\nconstantly==23.10.4\ncryptography==44.0.2\ncssselect==1.3.0\nexceptiongroup==1.2.2\nhyperlink==21.0.0\nidna==3.10\nincremental==24.7.2\niniconfig==2.1.0\njmespath==1.0.1\nlxml==5.3.1\npackaging==24.2\nparsel==1.10.0\npluggy==1.5.0\npyasn1==0.6.1\npyasn1_modules==0.4.2\npycparser==2.22\nPyDispatcher==2.0.7\npyOpenSSL==25.0.0\npytest==8.3.5\nqueuelib==1.7.0\n-e git+https://github.com/scrapy/scrapy.git@7d24df37380cd5a5b7394cd2534e240bd2eff0ca#egg=Scrapy\nservice-identity==24.2.0\nsix==1.17.0\ntomli==2.2.1\nTwisted==24.11.0\ntyping_extensions==4.13.0\nw3lib==2.3.1\nzope.interface==7.2\n"},"environment":{"kind":"string","value":"name: scrapy\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - ca-certificates=2025.2.25=h06a4308_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.4.4=h6a678d5_1\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - ncurses=6.4=h6a678d5_0\n - openssl=3.0.16=h5eee18b_0\n - pip=25.0=py39h06a4308_0\n - python=3.9.21=he870216_1\n - readline=8.2=h5eee18b_0\n - setuptools=75.8.0=py39h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - tzdata=2025a=h04d1e81_0\n - wheel=0.45.1=py39h06a4308_0\n - xz=5.6.4=h5eee18b_1\n - zlib=1.2.13=h5eee18b_1\n - pip:\n - attrs==25.3.0\n - automat==24.8.1\n - cffi==1.17.1\n - constantly==23.10.4\n - cryptography==44.0.2\n - cssselect==1.3.0\n - exceptiongroup==1.2.2\n - hyperlink==21.0.0\n - idna==3.10\n - incremental==24.7.2\n - iniconfig==2.1.0\n - jmespath==1.0.1\n - lxml==5.3.1\n - packaging==24.2\n - parsel==1.10.0\n - pluggy==1.5.0\n - pyasn1==0.6.1\n - pyasn1-modules==0.4.2\n - pycparser==2.22\n - pydispatcher==2.0.7\n - pyopenssl==25.0.0\n - pytest==8.3.5\n - queuelib==1.7.0\n - service-identity==24.2.0\n - six==1.17.0\n - tomli==2.2.1\n - twisted==24.11.0\n - typing-extensions==4.13.0\n - w3lib==2.3.1\n - zope-interface==7.2\nprefix: /opt/conda/envs/scrapy\n"},"FAIL_TO_PASS":{"kind":"list like","value":["tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_immediate_error"],"string":"[\n \"tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_immediate_error\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":["tests/test_engine.py::EngineTest::test_crawler","tests/test_http_response.py::TextResponseTest::test_invalid_utf8_encoded_body_with_valid_utf8_BOM","tests/test_http_response.py::TextResponseTest::test_selector","tests/test_http_response.py::TextResponseTest::test_selector_shortcuts","tests/test_http_response.py::HtmlResponseTest::test_invalid_utf8_encoded_body_with_valid_utf8_BOM","tests/test_http_response.py::HtmlResponseTest::test_selector","tests/test_http_response.py::HtmlResponseTest::test_selector_shortcuts","tests/test_http_response.py::XmlResponseTest::test_invalid_utf8_encoded_body_with_valid_utf8_BOM","tests/test_http_response.py::XmlResponseTest::test_selector","tests/test_http_response.py::XmlResponseTest::test_selector_shortcuts"],"string":"[\n \"tests/test_engine.py::EngineTest::test_crawler\",\n \"tests/test_http_response.py::TextResponseTest::test_invalid_utf8_encoded_body_with_valid_utf8_BOM\",\n \"tests/test_http_response.py::TextResponseTest::test_selector\",\n \"tests/test_http_response.py::TextResponseTest::test_selector_shortcuts\",\n \"tests/test_http_response.py::HtmlResponseTest::test_invalid_utf8_encoded_body_with_valid_utf8_BOM\",\n \"tests/test_http_response.py::HtmlResponseTest::test_selector\",\n \"tests/test_http_response.py::HtmlResponseTest::test_selector_shortcuts\",\n \"tests/test_http_response.py::XmlResponseTest::test_invalid_utf8_encoded_body_with_valid_utf8_BOM\",\n \"tests/test_http_response.py::XmlResponseTest::test_selector\",\n \"tests/test_http_response.py::XmlResponseTest::test_selector_shortcuts\"\n]"},"PASS_TO_PASS":{"kind":"list like","value":["tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_ignore_robotstxt_request","tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt","tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_empty_response","tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_error","tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_garbage","tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_meta","tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_ready_parser","tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_settings","tests/test_engine.py::EngineTest::test_close_downloader","tests/test_engine.py::EngineTest::test_close_engine_spiders_downloader","tests/test_engine.py::EngineTest::test_close_spiders_downloader","tests/test_http_response.py::BaseResponseTest::test_copy","tests/test_http_response.py::BaseResponseTest::test_copy_inherited_classes","tests/test_http_response.py::BaseResponseTest::test_copy_meta","tests/test_http_response.py::BaseResponseTest::test_immutable_attributes","tests/test_http_response.py::BaseResponseTest::test_init","tests/test_http_response.py::BaseResponseTest::test_replace","tests/test_http_response.py::BaseResponseTest::test_urljoin","tests/test_http_response.py::TextResponseTest::test_bom_is_removed_from_body","tests/test_http_response.py::TextResponseTest::test_copy","tests/test_http_response.py::TextResponseTest::test_copy_inherited_classes","tests/test_http_response.py::TextResponseTest::test_copy_meta","tests/test_http_response.py::TextResponseTest::test_declared_encoding_invalid","tests/test_http_response.py::TextResponseTest::test_encoding","tests/test_http_response.py::TextResponseTest::test_immutable_attributes","tests/test_http_response.py::TextResponseTest::test_init","tests/test_http_response.py::TextResponseTest::test_replace","tests/test_http_response.py::TextResponseTest::test_replace_wrong_encoding","tests/test_http_response.py::TextResponseTest::test_unicode_body","tests/test_http_response.py::TextResponseTest::test_unicode_url","tests/test_http_response.py::TextResponseTest::test_urljoin","tests/test_http_response.py::TextResponseTest::test_urljoin_with_base_url","tests/test_http_response.py::TextResponseTest::test_utf16","tests/test_http_response.py::HtmlResponseTest::test_bom_is_removed_from_body","tests/test_http_response.py::HtmlResponseTest::test_copy","tests/test_http_response.py::HtmlResponseTest::test_copy_inherited_classes","tests/test_http_response.py::HtmlResponseTest::test_copy_meta","tests/test_http_response.py::HtmlResponseTest::test_declared_encoding_invalid","tests/test_http_response.py::HtmlResponseTest::test_encoding","tests/test_http_response.py::HtmlResponseTest::test_html5_meta_charset","tests/test_http_response.py::HtmlResponseTest::test_html_encoding","tests/test_http_response.py::HtmlResponseTest::test_immutable_attributes","tests/test_http_response.py::HtmlResponseTest::test_init","tests/test_http_response.py::HtmlResponseTest::test_replace","tests/test_http_response.py::HtmlResponseTest::test_replace_wrong_encoding","tests/test_http_response.py::HtmlResponseTest::test_unicode_body","tests/test_http_response.py::HtmlResponseTest::test_unicode_url","tests/test_http_response.py::HtmlResponseTest::test_urljoin","tests/test_http_response.py::HtmlResponseTest::test_urljoin_with_base_url","tests/test_http_response.py::HtmlResponseTest::test_utf16","tests/test_http_response.py::XmlResponseTest::test_bom_is_removed_from_body","tests/test_http_response.py::XmlResponseTest::test_copy","tests/test_http_response.py::XmlResponseTest::test_copy_inherited_classes","tests/test_http_response.py::XmlResponseTest::test_copy_meta","tests/test_http_response.py::XmlResponseTest::test_declared_encoding_invalid","tests/test_http_response.py::XmlResponseTest::test_encoding","tests/test_http_response.py::XmlResponseTest::test_immutable_attributes","tests/test_http_response.py::XmlResponseTest::test_init","tests/test_http_response.py::XmlResponseTest::test_replace","tests/test_http_response.py::XmlResponseTest::test_replace_encoding","tests/test_http_response.py::XmlResponseTest::test_replace_wrong_encoding","tests/test_http_response.py::XmlResponseTest::test_unicode_body","tests/test_http_response.py::XmlResponseTest::test_unicode_url","tests/test_http_response.py::XmlResponseTest::test_urljoin","tests/test_http_response.py::XmlResponseTest::test_urljoin_with_base_url","tests/test_http_response.py::XmlResponseTest::test_utf16","tests/test_http_response.py::XmlResponseTest::test_xml_encoding"],"string":"[\n \"tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_ignore_robotstxt_request\",\n \"tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt\",\n \"tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_empty_response\",\n \"tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_error\",\n \"tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_garbage\",\n \"tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_meta\",\n \"tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_ready_parser\",\n \"tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_settings\",\n \"tests/test_engine.py::EngineTest::test_close_downloader\",\n \"tests/test_engine.py::EngineTest::test_close_engine_spiders_downloader\",\n \"tests/test_engine.py::EngineTest::test_close_spiders_downloader\",\n \"tests/test_http_response.py::BaseResponseTest::test_copy\",\n \"tests/test_http_response.py::BaseResponseTest::test_copy_inherited_classes\",\n \"tests/test_http_response.py::BaseResponseTest::test_copy_meta\",\n \"tests/test_http_response.py::BaseResponseTest::test_immutable_attributes\",\n \"tests/test_http_response.py::BaseResponseTest::test_init\",\n \"tests/test_http_response.py::BaseResponseTest::test_replace\",\n \"tests/test_http_response.py::BaseResponseTest::test_urljoin\",\n \"tests/test_http_response.py::TextResponseTest::test_bom_is_removed_from_body\",\n \"tests/test_http_response.py::TextResponseTest::test_copy\",\n \"tests/test_http_response.py::TextResponseTest::test_copy_inherited_classes\",\n \"tests/test_http_response.py::TextResponseTest::test_copy_meta\",\n \"tests/test_http_response.py::TextResponseTest::test_declared_encoding_invalid\",\n \"tests/test_http_response.py::TextResponseTest::test_encoding\",\n \"tests/test_http_response.py::TextResponseTest::test_immutable_attributes\",\n \"tests/test_http_response.py::TextResponseTest::test_init\",\n \"tests/test_http_response.py::TextResponseTest::test_replace\",\n \"tests/test_http_response.py::TextResponseTest::test_replace_wrong_encoding\",\n \"tests/test_http_response.py::TextResponseTest::test_unicode_body\",\n \"tests/test_http_response.py::TextResponseTest::test_unicode_url\",\n \"tests/test_http_response.py::TextResponseTest::test_urljoin\",\n \"tests/test_http_response.py::TextResponseTest::test_urljoin_with_base_url\",\n \"tests/test_http_response.py::TextResponseTest::test_utf16\",\n \"tests/test_http_response.py::HtmlResponseTest::test_bom_is_removed_from_body\",\n \"tests/test_http_response.py::HtmlResponseTest::test_copy\",\n \"tests/test_http_response.py::HtmlResponseTest::test_copy_inherited_classes\",\n \"tests/test_http_response.py::HtmlResponseTest::test_copy_meta\",\n \"tests/test_http_response.py::HtmlResponseTest::test_declared_encoding_invalid\",\n \"tests/test_http_response.py::HtmlResponseTest::test_encoding\",\n \"tests/test_http_response.py::HtmlResponseTest::test_html5_meta_charset\",\n \"tests/test_http_response.py::HtmlResponseTest::test_html_encoding\",\n \"tests/test_http_response.py::HtmlResponseTest::test_immutable_attributes\",\n \"tests/test_http_response.py::HtmlResponseTest::test_init\",\n \"tests/test_http_response.py::HtmlResponseTest::test_replace\",\n \"tests/test_http_response.py::HtmlResponseTest::test_replace_wrong_encoding\",\n \"tests/test_http_response.py::HtmlResponseTest::test_unicode_body\",\n \"tests/test_http_response.py::HtmlResponseTest::test_unicode_url\",\n \"tests/test_http_response.py::HtmlResponseTest::test_urljoin\",\n \"tests/test_http_response.py::HtmlResponseTest::test_urljoin_with_base_url\",\n \"tests/test_http_response.py::HtmlResponseTest::test_utf16\",\n \"tests/test_http_response.py::XmlResponseTest::test_bom_is_removed_from_body\",\n \"tests/test_http_response.py::XmlResponseTest::test_copy\",\n \"tests/test_http_response.py::XmlResponseTest::test_copy_inherited_classes\",\n \"tests/test_http_response.py::XmlResponseTest::test_copy_meta\",\n \"tests/test_http_response.py::XmlResponseTest::test_declared_encoding_invalid\",\n \"tests/test_http_response.py::XmlResponseTest::test_encoding\",\n \"tests/test_http_response.py::XmlResponseTest::test_immutable_attributes\",\n \"tests/test_http_response.py::XmlResponseTest::test_init\",\n \"tests/test_http_response.py::XmlResponseTest::test_replace\",\n \"tests/test_http_response.py::XmlResponseTest::test_replace_encoding\",\n \"tests/test_http_response.py::XmlResponseTest::test_replace_wrong_encoding\",\n \"tests/test_http_response.py::XmlResponseTest::test_unicode_body\",\n \"tests/test_http_response.py::XmlResponseTest::test_unicode_url\",\n \"tests/test_http_response.py::XmlResponseTest::test_urljoin\",\n \"tests/test_http_response.py::XmlResponseTest::test_urljoin_with_base_url\",\n \"tests/test_http_response.py::XmlResponseTest::test_utf16\",\n \"tests/test_http_response.py::XmlResponseTest::test_xml_encoding\"\n]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"BSD 3-Clause \"New\" or \"Revised\" License"},"__index_level_0__":{"kind":"number","value":399,"string":"399"}}},{"rowIdx":399,"cells":{"instance_id":{"kind":"string","value":"abh1nav__gnippy-16"},"base_commit":{"kind":"string","value":"f9fc42bf37cb9f415df18fd7f72a238d1cbd69e0"},"created_at":{"kind":"string","value":"2016-01-28 04:30:48"},"environment_setup_commit":{"kind":"string","value":"f9fc42bf37cb9f415df18fd7f72a238d1cbd69e0"},"hints_text":{"kind":"string","value":""},"patch":{"kind":"string","value":"diff --git a/gnippy/rules.py b/gnippy/rules.py\nindex 0309b5d..f5f1d04 100644\n--- a/gnippy/rules.py\n+++ b/gnippy/rules.py\n@@ -2,6 +2,11 @@\n \n import json\n \n+try:\n+ from urllib.parse import urlparse\n+except:\n+ from urlparse import urlparse\n+\n import requests\n from six import string_types\n \n@@ -82,6 +87,17 @@ def _post(conf, built_rules):\n error_text = \"HTTP Response Code: %s, Text: '%s'\" % (str(r.status_code), r.text)\n raise RuleAddFailedException(error_text)\n \n+def _generate_delete_url(conf):\n+ \"\"\"\n+ Generate the Rules URL for a DELETE request.\n+ \"\"\"\n+ generated_url = _generate_rules_url(conf['url'])\n+ parsed_url = urlparse(generated_url)\n+ query = parsed_url.query\n+ if query != '':\n+ return generated_url.replace(query, query + \"&_method=delete\")\n+ else:\n+ return generated_url + \"?_method=delete\"\n \n def _delete(conf, built_rules):\n \"\"\"\n@@ -99,7 +115,7 @@ def _delete(conf, built_rules):\n built_rules: A single or list of built rules.\n \"\"\"\n _check_rules_list(built_rules)\n- rules_url = _generate_rules_url(conf['url']) + \"?_method=delete\"\n+ rules_url = _generate_delete_url(conf)\n delete_data = json.dumps(_generate_post_object(built_rules))\n r = requests.post(rules_url, auth=conf['auth'], data=delete_data)\n if not r.status_code in range(200,300):\ndiff --git a/setup.py b/setup.py\nindex 42ddcbc..d25cd66 100644\n--- a/setup.py\n+++ b/setup.py\n@@ -3,7 +3,7 @@\n import os\n import sys\n \n-version = \"0.5.0\"\n+version = \"0.5.1\"\n \n try:\n from setuptools import setup\n"},"problem_statement":{"kind":"string","value":"Rules URL is incorrectly generated if endpoint URL has parameters\nThis does not work if `conf['url']` has params.\r\n\r\n```\r\nrules_url = _generate_rules_url(conf['url']) + \"?_method=delete\"\r\n```\r\n\r\nExample generated URL:\r\n\r\n```\r\nhttps://api.gnip.com:443/accounts/XXX/publishers/twitter/streams/track/prod/rules.json?client=2?_method=delete\r\n```"},"repo":{"kind":"string","value":"abh1nav/gnippy"},"test_patch":{"kind":"string","value":"diff --git a/gnippy/test/test_rules.py b/gnippy/test/test_rules.py\nindex 4310376..a79a857 100644\n--- a/gnippy/test/test_rules.py\n+++ b/gnippy/test/test_rules.py\n@@ -258,6 +258,20 @@ class RulesTestCase(unittest.TestCase):\n r = rules.get_rules(config_file_path=test_utils.test_config_path)\n self.assertEqual(1, len(r))\n \n+ def test_generate_delete_url_normal_case(self):\n+ \"\"\" Check if the Delete URL is generated correctly. \"\"\"\n+ conf = { 'url': 'https://stream.gnip.com:443/accounts/XXX/publishers/twitter/streams/track/prod.json' }\n+ url = rules._generate_delete_url(conf)\n+ self.assertEqual('https://api.gnip.com:443/accounts/XXX/publishers/twitter/streams/track/prod/rules.json?_method=delete',\n+ url)\n+\n+ def test_generate_delete_url_with_query(self):\n+ \"\"\" Account for https://github.com/abh1nav/gnippy/issues/15 \"\"\"\n+ conf = { 'url': 'https://stream.gnip.com:443/accounts/XXX/publishers/twitter/streams/track/prod.json?client=2' }\n+ url = rules._generate_delete_url(conf)\n+ self.assertEqual('https://api.gnip.com:443/accounts/XXX/publishers/twitter/streams/track/prod/rules.json?client=2&_method=delete',\n+ url)\n+\n @mock.patch('requests.post', good_delete)\n def test_delete_rules_single(self):\n \"\"\" Delete one rule. \"\"\"\n"},"meta":{"kind":"string","value":"{\n \"commit_name\": \"head_commit\",\n \"failed_lite_validators\": [\n \"has_short_problem_statement\",\n \"has_hyperlinks\",\n \"has_many_modified_files\",\n \"has_many_hunks\"\n ],\n \"has_test_patch\": true,\n \"is_lite\": false,\n \"llm_score\": {\n \"difficulty_score\": 2,\n \"issue_text_score\": 1,\n \"test_score\": 1\n },\n \"num_modified_files\": 2\n}"},"version":{"kind":"string","value":"0.5"},"install_config":{"kind":"string","value":"{\n \"env_vars\": null,\n \"env_yml_path\": null,\n \"install\": \"pip install -e .\",\n \"log_parser\": \"parse_log_pytest\",\n \"no_use_env\": null,\n \"packages\": \"requirements.txt\",\n \"pip_packages\": [\n \"pytest\"\n ],\n \"pre_install\": null,\n \"python\": \"3.9\",\n \"reqs_path\": [\n \"requirements-dev.txt\"\n ],\n \"test_cmd\": \"pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning\"\n}"},"requirements":{"kind":"string","value":"cachetools==5.5.2\nchardet==5.2.0\ncolorama==0.4.6\ndistlib==0.3.9\nexceptiongroup==1.2.2\nfilelock==3.18.0\n-e git+https://github.com/abh1nav/gnippy.git@f9fc42bf37cb9f415df18fd7f72a238d1cbd69e0#egg=gnippy\niniconfig==2.1.0\nmock==1.0.1\nnose==1.3.0\npackaging==24.2\nplatformdirs==4.3.7\npluggy==1.5.0\npyproject-api==1.9.0\npytest==8.3.5\nrequests==2.8.1\nsix==1.10.0\ntomli==2.2.1\ntox==4.25.0\ntyping_extensions==4.13.0\nvirtualenv==20.29.3\n"},"environment":{"kind":"string","value":"name: gnippy\nchannels:\n - defaults\n - https://repo.anaconda.com/pkgs/main\n - https://repo.anaconda.com/pkgs/r\n - conda-forge\ndependencies:\n - _libgcc_mutex=0.1=main\n - _openmp_mutex=5.1=1_gnu\n - ca-certificates=2025.2.25=h06a4308_0\n - ld_impl_linux-64=2.40=h12ee557_0\n - libffi=3.4.4=h6a678d5_1\n - libgcc-ng=11.2.0=h1234567_1\n - libgomp=11.2.0=h1234567_1\n - libstdcxx-ng=11.2.0=h1234567_1\n - ncurses=6.4=h6a678d5_0\n - openssl=3.0.16=h5eee18b_0\n - pip=25.0=py39h06a4308_0\n - python=3.9.21=he870216_1\n - readline=8.2=h5eee18b_0\n - setuptools=75.8.0=py39h06a4308_0\n - sqlite=3.45.3=h5eee18b_0\n - tk=8.6.14=h39e8969_0\n - tzdata=2025a=h04d1e81_0\n - wheel=0.45.1=py39h06a4308_0\n - xz=5.6.4=h5eee18b_1\n - zlib=1.2.13=h5eee18b_1\n - pip:\n - cachetools==5.5.2\n - chardet==5.2.0\n - colorama==0.4.6\n - distlib==0.3.9\n - exceptiongroup==1.2.2\n - filelock==3.18.0\n - iniconfig==2.1.0\n - mock==1.0.1\n - nose==1.3.0\n - packaging==24.2\n - platformdirs==4.3.7\n - pluggy==1.5.0\n - pyproject-api==1.9.0\n - pytest==8.3.5\n - requests==2.8.1\n - six==1.10.0\n - tomli==2.2.1\n - tox==4.25.0\n - typing-extensions==4.13.0\n - virtualenv==20.29.3\nprefix: /opt/conda/envs/gnippy\n"},"FAIL_TO_PASS":{"kind":"list like","value":["gnippy/test/test_rules.py::RulesTestCase::test_generate_delete_url_normal_case","gnippy/test/test_rules.py::RulesTestCase::test_generate_delete_url_with_query"],"string":"[\n \"gnippy/test/test_rules.py::RulesTestCase::test_generate_delete_url_normal_case\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_generate_delete_url_with_query\"\n]"},"FAIL_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"PASS_TO_PASS":{"kind":"list like","value":["gnippy/test/test_rules.py::RulesTestCase::test_add_many_rules_no_creds","gnippy/test/test_rules.py::RulesTestCase::test_add_many_rules_not_ok","gnippy/test/test_rules.py::RulesTestCase::test_add_many_rules_ok","gnippy/test/test_rules.py::RulesTestCase::test_add_one_rule_no_creds","gnippy/test/test_rules.py::RulesTestCase::test_add_one_rule_not_ok","gnippy/test/test_rules.py::RulesTestCase::test_add_one_rule_ok","gnippy/test/test_rules.py::RulesTestCase::test_build_post_object","gnippy/test/test_rules.py::RulesTestCase::test_build_rule_bad_args","gnippy/test/test_rules.py::RulesTestCase::test_build_rule_with_tag","gnippy/test/test_rules.py::RulesTestCase::test_build_rule_without_tag","gnippy/test/test_rules.py::RulesTestCase::test_build_rules_url","gnippy/test/test_rules.py::RulesTestCase::test_build_rules_url_bad","gnippy/test/test_rules.py::RulesTestCase::test_check_many_rules_ok","gnippy/test/test_rules.py::RulesTestCase::test_check_one_rule_extra_stuff_in_rule","gnippy/test/test_rules.py::RulesTestCase::test_check_one_rule_ok","gnippy/test/test_rules.py::RulesTestCase::test_check_one_rule_typo_tag","gnippy/test/test_rules.py::RulesTestCase::test_check_one_rule_typo_values","gnippy/test/test_rules.py::RulesTestCase::test_check_rule_tag_none","gnippy/test/test_rules.py::RulesTestCase::test_delete_rules_multiple","gnippy/test/test_rules.py::RulesTestCase::test_delete_rules_single","gnippy/test/test_rules.py::RulesTestCase::test_get_rules_bad_json","gnippy/test/test_rules.py::RulesTestCase::test_get_rules_bad_status_code","gnippy/test/test_rules.py::RulesTestCase::test_get_rules_no_rules_field_json","gnippy/test/test_rules.py::RulesTestCase::test_get_rules_requests_get_exception","gnippy/test/test_rules.py::RulesTestCase::test_get_rules_success_no_rules","gnippy/test/test_rules.py::RulesTestCase::test_get_rules_success_one_rule"],"string":"[\n \"gnippy/test/test_rules.py::RulesTestCase::test_add_many_rules_no_creds\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_add_many_rules_not_ok\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_add_many_rules_ok\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_add_one_rule_no_creds\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_add_one_rule_not_ok\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_add_one_rule_ok\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_build_post_object\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_build_rule_bad_args\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_build_rule_with_tag\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_build_rule_without_tag\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_build_rules_url\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_build_rules_url_bad\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_check_many_rules_ok\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_check_one_rule_extra_stuff_in_rule\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_check_one_rule_ok\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_check_one_rule_typo_tag\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_check_one_rule_typo_values\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_check_rule_tag_none\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_delete_rules_multiple\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_delete_rules_single\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_get_rules_bad_json\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_get_rules_bad_status_code\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_get_rules_no_rules_field_json\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_get_rules_requests_get_exception\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_get_rules_success_no_rules\",\n \"gnippy/test/test_rules.py::RulesTestCase::test_get_rules_success_one_rule\"\n]"},"PASS_TO_FAIL":{"kind":"list like","value":[],"string":"[]"},"license_name":{"kind":"string","value":"Apache License 2.0"},"__index_level_0__":{"kind":"number","value":400,"string":"400"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":3,"numItemsPerPage":100,"numTotalItems":21336,"offset":300,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjEwMDY1NCwic3ViIjoiL2RhdGFzZXRzL25lYml1cy9TV0UtcmViZW5jaCIsImV4cCI6MTc1NjEwNDI1NCwiaXNzIjoiaHR0cHM6Ly9odWdnaW5nZmFjZS5jbyJ9.um7lqtbA8vcyupGL3GAjyVNobl9E_kpCyRNbkyfbz7kul11osyL-bUzOCEGfwerY2_YQWWNWNISQjOrqtjj4Ag","displayUrls":true},"discussionsStats":{"closed":3,"open":3,"total":6},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[{"views":[{"key":"default/test","displayName":"test","viewName":"test"}],"sql":"-- The SQL console is powered by DuckDB WASM and runs entirely in the browser.\n-- Get started by typing a query or selecting a view from the options below.\nSELECT * FROM test where instance_id = 'oemof__tespy-653'","title":"Filter Test Data by Instance ID","createdAt":"2025-05-30T02:20:23.351Z","slug":"Mlmmb3M","private":false,"justification":"Retrieves all records for a specific instance, providing limited insight into the data structure but no broader analytical value.","viewNames":["test"]}],"user":[]}}">
instance_id
stringlengths
10
57
base_commit
stringlengths
40
40
created_at
stringdate
2014-04-30 14:58:36
2025-04-30 20:14:11
environment_setup_commit
stringlengths
40
40
hints_text
stringlengths
0
273k
patch
stringlengths
251
7.06M
problem_statement
stringlengths
11
52.5k
repo
stringlengths
7
53
test_patch
stringlengths
231
997k
meta
dict
version
stringclasses
864 values
install_config
dict
requirements
stringlengths
93
34.2k
environment
stringlengths
760
20.5k
FAIL_TO_PASS
listlengths
1
9.39k
FAIL_TO_FAIL
listlengths
0
2.69k
PASS_TO_PASS
listlengths
0
7.87k
PASS_TO_FAIL
listlengths
0
192
license_name
stringclasses
56 values
__index_level_0__
int64
0
21.4k
docker__docker-py-854
9ebecb5991303d55fe208114a1de422650c4dcb2
2015-11-16 19:50:27
2f2d50d0c7be5882b150f6ff3bae31d469720e5b
GordonTheTurtle: Please sign your commits following these rules: https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work The easiest way to do this is to amend the last commit: ~~~console $ git clone -b "840-add-exception-utility-method-for-create-host-config" [email protected]:lots0logs/docker-py.git somewhere $ cd somewhere $ git rebase -i HEAD~4 editor opens change each 'pick' to 'edit' save the file and quit $ git commit --amend -s --no-edit $ git rebase --continue # and repeat the amend for each commit $ git push -f ~~~ Ammending updates the existing PR. You **DO NOT** need to open a new one. lots0logs: @shin- @dnephin aanand: LGTM dnephin: LGTM, but I think it needs a rebase. It might be that the gihub "branch protection" is preventing me from merging this because I'm not an owner on this repo. GordonTheTurtle: Please sign your commits following these rules: https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work The easiest way to do this is to amend the last commit: ~~~console $ git clone -b "840-add-exception-utility-method-for-create-host-config" [email protected]:lots0logs/docker-py.git somewhere $ cd somewhere $ git rebase -i HEAD~5 editor opens change each 'pick' to 'edit' save the file and quit $ git commit --amend -s --no-edit $ git rebase --continue # and repeat the amend for each commit $ git push -f ~~~ Ammending updates the existing PR. You **DO NOT** need to open a new one. GordonTheTurtle: Please sign your commits following these rules: https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work The easiest way to do this is to amend the last commit: ~~~console $ git clone -b "840-add-exception-utility-method-for-create-host-config" [email protected]:lots0logs/docker-py.git somewhere $ cd somewhere $ git rebase -i HEAD~5 editor opens change each 'pick' to 'edit' save the file and quit $ git commit --amend -s --no-edit $ git rebase --continue # and repeat the amend for each commit $ git push -f ~~~ Ammending updates the existing PR. You **DO NOT** need to open a new one. lots0logs: @dnephin
diff --git a/docker/utils/utils.py b/docker/utils/utils.py index 560ee8e2..9c4bb477 100644 --- a/docker/utils/utils.py +++ b/docker/utils/utils.py @@ -236,7 +236,7 @@ def convert_port_bindings(port_bindings): for k, v in six.iteritems(port_bindings): key = str(k) if '/' not in key: - key = key + '/tcp' + key += '/tcp' if isinstance(v, list): result[key] = [_convert_port_binding(binding) for binding in v] else: @@ -434,7 +434,7 @@ def parse_bytes(s): s = 0 else: if s[-2:-1].isalpha() and s[-1].isalpha(): - if (s[-1] == "b" or s[-1] == "B"): + if s[-1] == "b" or s[-1] == "B": s = s[:-1] units = BYTE_UNITS suffix = s[-1].lower() @@ -467,16 +467,32 @@ def parse_bytes(s): return s -def create_host_config( - binds=None, port_bindings=None, lxc_conf=None, publish_all_ports=False, - links=None, privileged=False, dns=None, dns_search=None, volumes_from=None, - network_mode=None, restart_policy=None, cap_add=None, cap_drop=None, - devices=None, extra_hosts=None, read_only=None, pid_mode=None, - ipc_mode=None, security_opt=None, ulimits=None, log_config=None, - mem_limit=None, memswap_limit=None, mem_swappiness=None, - cgroup_parent=None, group_add=None, cpu_quota=None, cpu_period=None, - oom_kill_disable=False, version=None -): +def host_config_type_error(param, param_value, expected): + error_msg = 'Invalid type for {0} param: expected {1} but found {2}' + return TypeError(error_msg.format(param, expected, type(param_value))) + + +def host_config_version_error(param, version, less_than=True): + operator = '<' if less_than else '>' + error_msg = '{0} param is not supported in API versions {1} {2}' + return errors.InvalidVersion(error_msg.format(param, operator, version)) + + +def host_config_value_error(param, param_value): + error_msg = 'Invalid value for {0} param: {1}' + return ValueError(error_msg.format(param, param_value)) + + +def create_host_config(binds=None, port_bindings=None, lxc_conf=None, + publish_all_ports=False, links=None, privileged=False, + dns=None, dns_search=None, volumes_from=None, + network_mode=None, restart_policy=None, cap_add=None, + cap_drop=None, devices=None, extra_hosts=None, + read_only=None, pid_mode=None, ipc_mode=None, + security_opt=None, ulimits=None, log_config=None, + mem_limit=None, memswap_limit=None, mem_swappiness=None, + cgroup_parent=None, group_add=None, cpu_quota=None, + cpu_period=None, oom_kill_disable=False, version=None): host_config = {} @@ -496,24 +512,21 @@ def create_host_config( if memswap_limit is not None: if isinstance(memswap_limit, six.string_types): memswap_limit = parse_bytes(memswap_limit) + host_config['MemorySwap'] = memswap_limit if mem_swappiness is not None: if version_lt(version, '1.20'): - raise errors.InvalidVersion( - 'mem_swappiness param not supported for API version < 1.20' - ) + raise host_config_version_error('mem_swappiness', '1.20') if not isinstance(mem_swappiness, int): - raise TypeError( - 'Invalid type for mem_swappiness param: expected int but' - ' found {0}'.format(type(mem_swappiness)) + raise host_config_type_error( + 'mem_swappiness', mem_swappiness, 'int' ) + host_config['MemorySwappiness'] = mem_swappiness if pid_mode not in (None, 'host'): - raise errors.DockerException( - 'Invalid value for pid param: {0}'.format(pid_mode) - ) + raise host_config_value_error('pid_mode', pid_mode) elif pid_mode: host_config['PidMode'] = pid_mode @@ -524,10 +537,9 @@ def create_host_config( host_config['Privileged'] = privileged if oom_kill_disable: - if version_lt(version, '1.19'): - raise errors.InvalidVersion( - 'oom_kill_disable param not supported for API version < 1.19' - ) + if version_lt(version, '1.20'): + raise host_config_version_error('oom_kill_disable', '1.19') + host_config['OomKillDisable'] = oom_kill_disable if publish_all_ports: @@ -545,6 +557,11 @@ def create_host_config( host_config['NetworkMode'] = 'default' if restart_policy: + if not isinstance(restart_policy, dict): + raise host_config_type_error( + 'restart_policy', restart_policy, 'dict' + ) + host_config['RestartPolicy'] = restart_policy if cap_add: @@ -558,9 +575,8 @@ def create_host_config( if group_add: if version_lt(version, '1.20'): - raise errors.InvalidVersion( - 'group_add param not supported for API version < 1.20' - ) + raise host_config_version_error('group_add', '1.20') + host_config['GroupAdd'] = [six.text_type(grp) for grp in group_add] if dns is not None: @@ -568,24 +584,21 @@ def create_host_config( if security_opt is not None: if not isinstance(security_opt, list): - raise errors.DockerException( - 'Invalid type for security_opt param: expected list but found' - ' {0}'.format(type(security_opt)) - ) + raise host_config_type_error('security_opt', security_opt, 'list') + host_config['SecurityOpt'] = security_opt if volumes_from is not None: if isinstance(volumes_from, six.string_types): volumes_from = volumes_from.split(',') + host_config['VolumesFrom'] = volumes_from if binds is not None: host_config['Binds'] = convert_volume_binds(binds) if port_bindings is not None: - host_config['PortBindings'] = convert_port_bindings( - port_bindings - ) + host_config['PortBindings'] = convert_port_bindings(port_bindings) if extra_hosts is not None: if isinstance(extra_hosts, dict): @@ -600,9 +613,7 @@ def create_host_config( if isinstance(links, dict): links = six.iteritems(links) - formatted_links = [ - '{0}:{1}'.format(k, v) for k, v in sorted(links) - ] + formatted_links = ['{0}:{1}'.format(k, v) for k, v in sorted(links)] host_config['Links'] = formatted_links @@ -620,10 +631,7 @@ def create_host_config( if ulimits is not None: if not isinstance(ulimits, list): - raise errors.DockerException( - 'Invalid type for ulimits param: expected list but found' - ' {0}'.format(type(ulimits)) - ) + raise host_config_type_error('ulimits', ulimits, 'list') host_config['Ulimits'] = [] for l in ulimits: if not isinstance(l, Ulimit): @@ -633,35 +641,27 @@ def create_host_config( if log_config is not None: if not isinstance(log_config, LogConfig): if not isinstance(log_config, dict): - raise errors.DockerException( - 'Invalid type for log_config param: expected LogConfig but' - ' found {0}'.format(type(log_config)) + raise host_config_type_error( + 'log_config', log_config, 'LogConfig' ) log_config = LogConfig(**log_config) + host_config['LogConfig'] = log_config if cpu_quota: if not isinstance(cpu_quota, int): - raise TypeError( - 'Invalid type for cpu_quota param: expected int but' - ' found {0}'.format(type(cpu_quota)) - ) + raise host_config_type_error('cpu_quota', cpu_quota, 'int') if version_lt(version, '1.19'): - raise errors.InvalidVersion( - 'cpu_quota param not supported for API version < 1.19' - ) + raise host_config_version_error('cpu_quota', '1.19') + host_config['CpuQuota'] = cpu_quota if cpu_period: if not isinstance(cpu_period, int): - raise TypeError( - 'Invalid type for cpu_period param: expected int but' - ' found {0}'.format(type(cpu_period)) - ) + raise host_config_type_error('cpu_period', cpu_period, 'int') if version_lt(version, '1.19'): - raise errors.InvalidVersion( - 'cpu_period param not supported for API version < 1.19' - ) + raise host_config_version_error('cpu_period', '1.19') + host_config['CpuPeriod'] = cpu_period return host_config
[enhancement] Add utility method to utils.py for raising errors PR incoming...
docker/docker-py
diff --git a/tests/integration/container_test.py b/tests/integration/container_test.py index 03965146..79840a1b 100644 --- a/tests/integration/container_test.py +++ b/tests/integration/container_test.py @@ -364,6 +364,13 @@ class CreateContainerTest(helpers.BaseTestCase): host_config = inspect['HostConfig'] self.assertIn('MemorySwappiness', host_config) + def test_create_host_config_exception_raising(self): + self.assertRaises(TypeError, + self.client.create_host_config, mem_swappiness='40') + + self.assertRaises(ValueError, + self.client.create_host_config, pid_mode='40') + class VolumeBindTest(helpers.BaseTestCase): def setUp(self): diff --git a/tests/unit/api_test.py b/tests/unit/api_test.py index 62d64e8a..23fd1913 100644 --- a/tests/unit/api_test.py +++ b/tests/unit/api_test.py @@ -314,8 +314,7 @@ class DockerApiTest(DockerClientTest): self.assertIn('SecurityOpt', result) self.assertEqual(result['SecurityOpt'], security_opt) self.assertRaises( - docker.errors.DockerException, self.client.create_host_config, - security_opt='wrong' + TypeError, self.client.create_host_config, security_opt='wrong' )
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 3, "issue_text_score": 3, "test_score": 2 }, "num_modified_files": 1 }
1.6
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "flake8" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.7", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi @ file:///croot/certifi_1671487769961/work/certifi coverage==7.2.7 -e git+https://github.com/docker/docker-py.git@9ebecb5991303d55fe208114a1de422650c4dcb2#egg=docker_py exceptiongroup==1.2.2 flake8==5.0.4 importlib-metadata==4.2.0 iniconfig==2.0.0 mccabe==0.7.0 packaging==24.0 pluggy==1.2.0 pycodestyle==2.9.1 pyflakes==2.5.0 pytest==7.4.4 pytest-cov==4.1.0 requests==2.5.3 six==1.17.0 tomli==2.0.1 typing_extensions==4.7.1 websocket-client==0.32.0 zipp==3.15.0
name: docker-py channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.2.7 - exceptiongroup==1.2.2 - flake8==5.0.4 - importlib-metadata==4.2.0 - iniconfig==2.0.0 - mccabe==0.7.0 - packaging==24.0 - pluggy==1.2.0 - pycodestyle==2.9.1 - pyflakes==2.5.0 - pytest==7.4.4 - pytest-cov==4.1.0 - requests==2.5.3 - six==1.17.0 - tomli==2.0.1 - typing-extensions==4.7.1 - websocket-client==0.32.0 - zipp==3.15.0 prefix: /opt/conda/envs/docker-py
[ "tests/unit/api_test.py::DockerApiTest::test_create_host_config_secopt" ]
[]
[ "tests/unit/api_test.py::DockerApiTest::test_auto_retrieve_server_version", "tests/unit/api_test.py::DockerApiTest::test_ctor", "tests/unit/api_test.py::DockerApiTest::test_events", "tests/unit/api_test.py::DockerApiTest::test_events_with_filters", "tests/unit/api_test.py::DockerApiTest::test_events_with_since_until", "tests/unit/api_test.py::DockerApiTest::test_info", "tests/unit/api_test.py::DockerApiTest::test_remove_link", "tests/unit/api_test.py::DockerApiTest::test_retrieve_server_version", "tests/unit/api_test.py::DockerApiTest::test_search", "tests/unit/api_test.py::DockerApiTest::test_url_compatibility_http", "tests/unit/api_test.py::DockerApiTest::test_url_compatibility_http_unix_triple_slash", "tests/unit/api_test.py::DockerApiTest::test_url_compatibility_tcp", "tests/unit/api_test.py::DockerApiTest::test_url_compatibility_unix", "tests/unit/api_test.py::DockerApiTest::test_url_compatibility_unix_triple_slash", "tests/unit/api_test.py::DockerApiTest::test_url_invalid_resource", "tests/unit/api_test.py::DockerApiTest::test_url_no_resource", "tests/unit/api_test.py::DockerApiTest::test_url_unversioned_api", "tests/unit/api_test.py::DockerApiTest::test_url_valid_resource", "tests/unit/api_test.py::DockerApiTest::test_version", "tests/unit/api_test.py::DockerApiTest::test_version_no_api_version", "tests/unit/api_test.py::StreamTest::test_early_stream_response" ]
[]
Apache License 2.0
300
sympy__sympy-10147
9c0a7cc5b334c41d1c0a63cc85cd20a1702c4def
2015-11-18 10:43:00
0241b35cae5f2adc04dc37b25f7dc9c5f00bd746
diff --git a/sympy/concrete/expr_with_limits.py b/sympy/concrete/expr_with_limits.py index 0a8293a7de..1a7012bd9c 100644 --- a/sympy/concrete/expr_with_limits.py +++ b/sympy/concrete/expr_with_limits.py @@ -410,7 +410,7 @@ def _eval_factor(self, **hints): summand = self.function.factor(**hints) if summand.is_Mul: out = sift(summand.args, lambda w: w.is_commutative \ - and not w.has(*self.variables)) + and not set(self.variables) & w.free_symbols) return Mul(*out[True])*self.func(Mul(*out[False]), \ *self.limits) else: diff --git a/sympy/polys/polytools.py b/sympy/polys/polytools.py index f9b8399e5e..95b9a2ec0c 100644 --- a/sympy/polys/polytools.py +++ b/sympy/polys/polytools.py @@ -5611,19 +5611,15 @@ def _symbolic_factor_list(expr, opt, method): """Helper function for :func:`_symbolic_factor`. """ coeff, factors = S.One, [] - args = list(Mul.make_args(expr)) + args = [i._eval_factor() if hasattr(i, '_eval_factor') else i for i in Mul.make_args(expr)] for arg in args: if arg.is_Number: coeff *= arg continue - elif hasattr(arg, '_eval_factor'): - for i in Mul.make_args(arg._eval_factor()): - if hasattr(i, '_eval_factor'): - factors.append( (i, 1) ) - else: - args.append(i) + if arg.is_Mul: + args.extend(arg.args) continue - elif arg.is_Pow: + if arg.is_Pow: base, exp = arg.args if base.is_Number: factors.append((base, exp))
do not use `has` to test against self.variables when factoring Sum ```python >>> c = Sum(x,(x,1,2)) >>> (c*Sum(x, (x, 1, 3))).doit() 18 >>> (Sum(c*x, (x, 1, 3))).doit() 18 >>> (Sum(c*x, (x, 1, 3))).factor() Sum(x*Sum(x, (x, 1, 2)), (x, 1, 3)) <--- should be Sum(x, (x, 1, 2))*Sum(x, (x, 1, 3)) ```
sympy/sympy
diff --git a/sympy/concrete/tests/test_sums_products.py b/sympy/concrete/tests/test_sums_products.py index 8019a6a6c6..47c27b3a1a 100644 --- a/sympy/concrete/tests/test_sums_products.py +++ b/sympy/concrete/tests/test_sums_products.py @@ -922,3 +922,10 @@ def test_convergent_failing(): # dirichlet tests assert Sum(sin(n)/n, (n, 1, oo)).is_convergent() is S.true assert Sum(sin(2*n)/n, (n, 1, oo)).is_convergent() is S.true + + +def test_issue_10156(): + cx = Sum(2*y**2*x, (x, 1,3)) + e = 2*y*Sum(2*cx*x**2, (x, 1, 9)) + assert e.factor() == \ + 8*y**3*Sum(x, (x, 1, 3))*Sum(x**2, (x, 1, 9)) diff --git a/sympy/polys/tests/test_polytools.py b/sympy/polys/tests/test_polytools.py index 6a700633bc..82ff417971 100644 --- a/sympy/polys/tests/test_polytools.py +++ b/sympy/polys/tests/test_polytools.py @@ -2403,7 +2403,6 @@ def test_factor(): assert (2*Sum(x**2, (x, 1, 9))).factor() == 2*Sum(x**2, (x, 1, 9)) - def test_factor_large(): f = (x**2 + 4*x + 4)**10000000*(x**2 + 1)*(x**2 + 2*x + 1)**1234567 g = ((x**2 + 2*x + 1)**3000*y**2 + (x**2 + 2*x + 1)**3000*2*y + (
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 2 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///croot/attrs_1668696182826/work certifi @ file:///croot/certifi_1671487769961/work/certifi flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work mpmath==1.3.0 packaging @ file:///croot/packaging_1671697413597/work pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pytest==7.1.2 -e git+https://github.com/sympy/sympy.git@9c0a7cc5b334c41d1c0a63cc85cd20a1702c4def#egg=sympy tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing_extensions @ file:///croot/typing_extensions_1669924550328/work zipp @ file:///croot/zipp_1672387121353/work
name: sympy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=22.1.0=py37h06a4308_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - flit-core=3.6.0=pyhd3eb1b0_0 - importlib-metadata=4.11.3=py37h06a4308_0 - importlib_metadata=4.11.3=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=22.0=py37h06a4308_0 - pip=22.3.1=py37h06a4308_0 - pluggy=1.0.0=py37h06a4308_1 - py=1.11.0=pyhd3eb1b0_0 - pytest=7.1.2=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py37h06a4308_0 - typing_extensions=4.4.0=py37h06a4308_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zipp=3.11.0=py37h06a4308_0 - zlib=1.2.13=h5eee18b_1 - pip: - mpmath==1.3.0 prefix: /opt/conda/envs/sympy
[ "sympy/concrete/tests/test_sums_products.py::test_issue_10156" ]
[]
[ "sympy/concrete/tests/test_sums_products.py::test_karr_convention", "sympy/concrete/tests/test_sums_products.py::test_karr_proposition_2a", "sympy/concrete/tests/test_sums_products.py::test_karr_proposition_2b", "sympy/concrete/tests/test_sums_products.py::test_arithmetic_sums", "sympy/concrete/tests/test_sums_products.py::test_polynomial_sums", "sympy/concrete/tests/test_sums_products.py::test_geometric_sums", "sympy/concrete/tests/test_sums_products.py::test_harmonic_sums", "sympy/concrete/tests/test_sums_products.py::test_composite_sums", "sympy/concrete/tests/test_sums_products.py::test_hypergeometric_sums", "sympy/concrete/tests/test_sums_products.py::test_other_sums", "sympy/concrete/tests/test_sums_products.py::test_evalf_fast_series", "sympy/concrete/tests/test_sums_products.py::test_evalf_fast_series_issue_4021", "sympy/concrete/tests/test_sums_products.py::test_evalf_slow_series", "sympy/concrete/tests/test_sums_products.py::test_euler_maclaurin", "sympy/concrete/tests/test_sums_products.py::test_evalf_euler_maclaurin", "sympy/concrete/tests/test_sums_products.py::test_evalf_symbolic", "sympy/concrete/tests/test_sums_products.py::test_evalf_issue_3273", "sympy/concrete/tests/test_sums_products.py::test_simple_products", "sympy/concrete/tests/test_sums_products.py::test_rational_products", "sympy/concrete/tests/test_sums_products.py::test_wallis_product", "sympy/concrete/tests/test_sums_products.py::test_telescopic_sums", "sympy/concrete/tests/test_sums_products.py::test_sum_reconstruct", "sympy/concrete/tests/test_sums_products.py::test_limit_subs", "sympy/concrete/tests/test_sums_products.py::test_function_subs", "sympy/concrete/tests/test_sums_products.py::test_equality", "sympy/concrete/tests/test_sums_products.py::test_Sum_doit", "sympy/concrete/tests/test_sums_products.py::test_Product_doit", "sympy/concrete/tests/test_sums_products.py::test_Sum_interface", "sympy/concrete/tests/test_sums_products.py::test_eval_diff", "sympy/concrete/tests/test_sums_products.py::test_hypersum", "sympy/concrete/tests/test_sums_products.py::test_issue_4170", "sympy/concrete/tests/test_sums_products.py::test_is_commutative", "sympy/concrete/tests/test_sums_products.py::test_is_zero", "sympy/concrete/tests/test_sums_products.py::test_is_number", "sympy/concrete/tests/test_sums_products.py::test_free_symbols", "sympy/concrete/tests/test_sums_products.py::test_conjugate_transpose", "sympy/concrete/tests/test_sums_products.py::test_issue_4171", "sympy/concrete/tests/test_sums_products.py::test_issue_6273", "sympy/concrete/tests/test_sums_products.py::test_issue_6274", "sympy/concrete/tests/test_sums_products.py::test_simplify", "sympy/concrete/tests/test_sums_products.py::test_change_index", "sympy/concrete/tests/test_sums_products.py::test_reorder", "sympy/concrete/tests/test_sums_products.py::test_reverse_order", "sympy/concrete/tests/test_sums_products.py::test_issue_7097", "sympy/concrete/tests/test_sums_products.py::test_factor_expand_subs", "sympy/concrete/tests/test_sums_products.py::test_distribution_over_equality", "sympy/concrete/tests/test_sums_products.py::test_issue_2787", "sympy/concrete/tests/test_sums_products.py::test_issue_4668", "sympy/concrete/tests/test_sums_products.py::test_matrix_sum", "sympy/concrete/tests/test_sums_products.py::test_indexed_idx_sum", "sympy/concrete/tests/test_sums_products.py::test_is_convergent", "sympy/concrete/tests/test_sums_products.py::test_is_absolute_convergent", "sympy/polys/tests/test_polytools.py::test_Poly_from_dict", "sympy/polys/tests/test_polytools.py::test_Poly_from_list", "sympy/polys/tests/test_polytools.py::test_Poly_from_poly", "sympy/polys/tests/test_polytools.py::test_Poly_from_expr", "sympy/polys/tests/test_polytools.py::test_Poly__new__", "sympy/polys/tests/test_polytools.py::test_Poly__args", "sympy/polys/tests/test_polytools.py::test_Poly__gens", "sympy/polys/tests/test_polytools.py::test_Poly_zero", "sympy/polys/tests/test_polytools.py::test_Poly_one", "sympy/polys/tests/test_polytools.py::test_Poly__unify", "sympy/polys/tests/test_polytools.py::test_Poly_free_symbols", "sympy/polys/tests/test_polytools.py::test_PurePoly_free_symbols", "sympy/polys/tests/test_polytools.py::test_Poly__eq__", "sympy/polys/tests/test_polytools.py::test_PurePoly__eq__", "sympy/polys/tests/test_polytools.py::test_PurePoly_Poly", "sympy/polys/tests/test_polytools.py::test_Poly_get_domain", "sympy/polys/tests/test_polytools.py::test_Poly_set_domain", "sympy/polys/tests/test_polytools.py::test_Poly_get_modulus", "sympy/polys/tests/test_polytools.py::test_Poly_set_modulus", "sympy/polys/tests/test_polytools.py::test_Poly_add_ground", "sympy/polys/tests/test_polytools.py::test_Poly_sub_ground", "sympy/polys/tests/test_polytools.py::test_Poly_mul_ground", "sympy/polys/tests/test_polytools.py::test_Poly_quo_ground", "sympy/polys/tests/test_polytools.py::test_Poly_exquo_ground", "sympy/polys/tests/test_polytools.py::test_Poly_abs", "sympy/polys/tests/test_polytools.py::test_Poly_neg", "sympy/polys/tests/test_polytools.py::test_Poly_add", "sympy/polys/tests/test_polytools.py::test_Poly_sub", "sympy/polys/tests/test_polytools.py::test_Poly_mul", "sympy/polys/tests/test_polytools.py::test_Poly_sqr", "sympy/polys/tests/test_polytools.py::test_Poly_pow", "sympy/polys/tests/test_polytools.py::test_Poly_divmod", "sympy/polys/tests/test_polytools.py::test_Poly_eq_ne", "sympy/polys/tests/test_polytools.py::test_Poly_nonzero", "sympy/polys/tests/test_polytools.py::test_Poly_properties", "sympy/polys/tests/test_polytools.py::test_Poly_is_irreducible", "sympy/polys/tests/test_polytools.py::test_Poly_subs", "sympy/polys/tests/test_polytools.py::test_Poly_replace", "sympy/polys/tests/test_polytools.py::test_Poly_reorder", "sympy/polys/tests/test_polytools.py::test_Poly_ltrim", "sympy/polys/tests/test_polytools.py::test_Poly_has_only_gens", "sympy/polys/tests/test_polytools.py::test_Poly_to_ring", "sympy/polys/tests/test_polytools.py::test_Poly_to_field", "sympy/polys/tests/test_polytools.py::test_Poly_to_exact", "sympy/polys/tests/test_polytools.py::test_Poly_retract", "sympy/polys/tests/test_polytools.py::test_Poly_slice", "sympy/polys/tests/test_polytools.py::test_Poly_coeffs", "sympy/polys/tests/test_polytools.py::test_Poly_monoms", "sympy/polys/tests/test_polytools.py::test_Poly_terms", "sympy/polys/tests/test_polytools.py::test_Poly_all_coeffs", "sympy/polys/tests/test_polytools.py::test_Poly_all_monoms", "sympy/polys/tests/test_polytools.py::test_Poly_all_terms", "sympy/polys/tests/test_polytools.py::test_Poly_termwise", "sympy/polys/tests/test_polytools.py::test_Poly_length", "sympy/polys/tests/test_polytools.py::test_Poly_as_dict", "sympy/polys/tests/test_polytools.py::test_Poly_as_expr", "sympy/polys/tests/test_polytools.py::test_Poly_lift", "sympy/polys/tests/test_polytools.py::test_Poly_deflate", "sympy/polys/tests/test_polytools.py::test_Poly_inject", "sympy/polys/tests/test_polytools.py::test_Poly_eject", "sympy/polys/tests/test_polytools.py::test_Poly_exclude", "sympy/polys/tests/test_polytools.py::test_Poly__gen_to_level", "sympy/polys/tests/test_polytools.py::test_Poly_degree", "sympy/polys/tests/test_polytools.py::test_Poly_degree_list", "sympy/polys/tests/test_polytools.py::test_Poly_total_degree", "sympy/polys/tests/test_polytools.py::test_Poly_homogenize", "sympy/polys/tests/test_polytools.py::test_Poly_homogeneous_order", "sympy/polys/tests/test_polytools.py::test_Poly_LC", "sympy/polys/tests/test_polytools.py::test_Poly_TC", "sympy/polys/tests/test_polytools.py::test_Poly_EC", "sympy/polys/tests/test_polytools.py::test_Poly_coeff", "sympy/polys/tests/test_polytools.py::test_Poly_nth", "sympy/polys/tests/test_polytools.py::test_Poly_LM", "sympy/polys/tests/test_polytools.py::test_Poly_LM_custom_order", "sympy/polys/tests/test_polytools.py::test_Poly_EM", "sympy/polys/tests/test_polytools.py::test_Poly_LT", "sympy/polys/tests/test_polytools.py::test_Poly_ET", "sympy/polys/tests/test_polytools.py::test_Poly_max_norm", "sympy/polys/tests/test_polytools.py::test_Poly_l1_norm", "sympy/polys/tests/test_polytools.py::test_Poly_clear_denoms", "sympy/polys/tests/test_polytools.py::test_Poly_rat_clear_denoms", "sympy/polys/tests/test_polytools.py::test_Poly_integrate", "sympy/polys/tests/test_polytools.py::test_Poly_diff", "sympy/polys/tests/test_polytools.py::test_issue_9585", "sympy/polys/tests/test_polytools.py::test_Poly_eval", "sympy/polys/tests/test_polytools.py::test_Poly___call__", "sympy/polys/tests/test_polytools.py::test_parallel_poly_from_expr", "sympy/polys/tests/test_polytools.py::test_pdiv", "sympy/polys/tests/test_polytools.py::test_div", "sympy/polys/tests/test_polytools.py::test_gcdex", "sympy/polys/tests/test_polytools.py::test_revert", "sympy/polys/tests/test_polytools.py::test_subresultants", "sympy/polys/tests/test_polytools.py::test_resultant", "sympy/polys/tests/test_polytools.py::test_discriminant", "sympy/polys/tests/test_polytools.py::test_dispersion", "sympy/polys/tests/test_polytools.py::test_gcd_list", "sympy/polys/tests/test_polytools.py::test_lcm_list", "sympy/polys/tests/test_polytools.py::test_gcd", "sympy/polys/tests/test_polytools.py::test_gcd_numbers_vs_polys", "sympy/polys/tests/test_polytools.py::test_terms_gcd", "sympy/polys/tests/test_polytools.py::test_trunc", "sympy/polys/tests/test_polytools.py::test_monic", "sympy/polys/tests/test_polytools.py::test_content", "sympy/polys/tests/test_polytools.py::test_primitive", "sympy/polys/tests/test_polytools.py::test_compose", "sympy/polys/tests/test_polytools.py::test_shift", "sympy/polys/tests/test_polytools.py::test_sturm", "sympy/polys/tests/test_polytools.py::test_gff", "sympy/polys/tests/test_polytools.py::test_sqf_norm", "sympy/polys/tests/test_polytools.py::test_sqf", "sympy/polys/tests/test_polytools.py::test_factor", "sympy/polys/tests/test_polytools.py::test_factor_large", "sympy/polys/tests/test_polytools.py::test_intervals", "sympy/polys/tests/test_polytools.py::test_refine_root", "sympy/polys/tests/test_polytools.py::test_count_roots", "sympy/polys/tests/test_polytools.py::test_Poly_root", "sympy/polys/tests/test_polytools.py::test_real_roots", "sympy/polys/tests/test_polytools.py::test_all_roots", "sympy/polys/tests/test_polytools.py::test_nroots", "sympy/polys/tests/test_polytools.py::test_ground_roots", "sympy/polys/tests/test_polytools.py::test_nth_power_roots_poly", "sympy/polys/tests/test_polytools.py::test_torational_factor_list", "sympy/polys/tests/test_polytools.py::test_cancel", "sympy/polys/tests/test_polytools.py::test_reduced", "sympy/polys/tests/test_polytools.py::test_groebner", "sympy/polys/tests/test_polytools.py::test_fglm", "sympy/polys/tests/test_polytools.py::test_is_zero_dimensional", "sympy/polys/tests/test_polytools.py::test_GroebnerBasis", "sympy/polys/tests/test_polytools.py::test_poly", "sympy/polys/tests/test_polytools.py::test_keep_coeff", "sympy/polys/tests/test_polytools.py::test_noncommutative", "sympy/polys/tests/test_polytools.py::test_to_rational_coeffs" ]
[]
BSD
301
rackerlabs__lambda-uploader-29
abf7e64f20294e7f6f44169ccb496f61018667b7
2015-11-18 14:32:02
c40923a6982a0a3d4fd41b135a4f9b7e97b74f90
diff --git a/README.md b/README.md index 13e2baa..b3654f3 100644 --- a/README.md +++ b/README.md @@ -47,6 +47,11 @@ To specify an alternative profile that has been defined in `~/.aws/credentials` lambda-uploader --profile=alternative-profile ``` +To specify an alternative, prexisting virtualenv use the `--virtualenv` parameter. +```shell +lambda-uploader --virtualenv=~/.virtualenv/my_custom_virtualenv +``` + If you would prefer to upload another way you can tell the uploader to ignore the upload. This will create a package and leave it in the project directory. ```shell diff --git a/README.rst b/README.rst index ad7b199..db1a8b3 100644 --- a/README.rst +++ b/README.rst @@ -63,6 +63,12 @@ To specify an alternative profile that has been defined in lambda-uploader --profile=alternative-profile +To specify an alternative, prexisting virtualenv use the ``--virtualenv`` parameter. + +.. code:: shell + + lambda-uploader --virtualenv=~/.virtualenv/my_custom_virtualenv + If you would prefer to upload another way you can tell the uploader to ignore the upload. This will create a package and leave it in the project directory. diff --git a/lambda_uploader/package.py b/lambda_uploader/package.py index 083077f..8bbbf54 100644 --- a/lambda_uploader/package.py +++ b/lambda_uploader/package.py @@ -26,28 +26,38 @@ TEMP_WORKSPACE_NAME = ".lamba_uploader_temp" ZIPFILE_NAME = 'lambda_function.zip' -def build_package(path, requirements): - pkg = Package(path) +def build_package(path, requirements, virtualenv=None): + pkg = Package(path, virtualenv) pkg.clean_workspace() pkg.clean_zipfile() pkg.prepare_workspace() - pkg.install_requirements(requirements) + if virtualenv: + if not os.path.isdir(virtualenv): + raise Exception("supplied virtualenv %s not found" % virtualenv) + LOG.info("Using existing virtualenv found in %s" % virtualenv) + else: + LOG.info('Building new virtualenv and installing requirements') + pkg.prepare_virtualenv() + pkg.install_requirements(requirements) pkg.package() return pkg class Package(object): - def __init__(self, path): + def __init__(self, path, virtualenv=None): self._path = path self._temp_workspace = os.path.join(path, TEMP_WORKSPACE_NAME) self.zip_file = os.path.join(path, ZIPFILE_NAME) - self._pkg_venv = os.path.join(self._temp_workspace, 'venv') - self._venv_pip = 'bin/pip' - if sys.platform == 'win32' or sys.platform == 'cygwin': - self._venv_pip = 'Scripts\pip.exe' + if virtualenv: + self._pkg_venv = virtualenv + else: + self._pkg_venv = os.path.join(self._temp_workspace, 'venv') + self._venv_pip = 'bin/pip' + if sys.platform == 'win32' or sys.platform == 'cygwin': + self._venv_pip = 'Scripts\pip.exe' def clean_workspace(self): if os.path.isdir(self._temp_workspace): @@ -61,6 +71,7 @@ class Package(object): # Setup temporary workspace os.mkdir(self._temp_workspace) + def prepare_virtualenv(self): proc = Popen(["virtualenv", self._pkg_venv], stdout=PIPE, stderr=PIPE) stdout, stderr = proc.communicate() LOG.debug("Virtualenv stdout: %s" % stdout) diff --git a/lambda_uploader/shell.py b/lambda_uploader/shell.py index 027685f..5b57967 100644 --- a/lambda_uploader/shell.py +++ b/lambda_uploader/shell.py @@ -50,7 +50,7 @@ def _execute(args): cfg = config.Config(pth, args.config, role=args.role) _print('Building Package') - pkg = package.build_package(pth, cfg.requirements) + pkg = package.build_package(pth, cfg.requirements, args.virtualenv) if not args.no_clean: pkg.clean_workspace() @@ -97,6 +97,9 @@ def main(arv=None): action='store_const', help='publish an upload to an immutable version', const=True) + parser.add_argument('--virtualenv', '-e', + help='use specified virtualenv instead of making one', + default=None) parser.add_argument('--role', dest='role', default=getenv('LAMBDA_UPLOADER_ROLE'), help=('IAM role to assign the lambda function, '
Allow a virtualenv for another platform to be included Instead of building a virtualenv with requirements.txt, allow an existing virtualenv to be used. This would allow us to build a virtualenv for Amazon Linux, and then still deploy lambda functions using lambda-uploader from client machines or other automation that may not be the same architecture as what Lambda runs on. This is needed for me due to PyPi's cryptography package using different shared libraries on Ubuntu (my desktop) and Amazon Linux (where the function will run).
rackerlabs/lambda-uploader
diff --git a/test/test_package.py b/test/test_package.py index 88cc9f6..1a64572 100644 --- a/test/test_package.py +++ b/test/test_package.py @@ -39,6 +39,7 @@ def test_prepare_workspace(): pkg = package.Package(TESTING_TEMP_DIR) pkg.prepare_workspace() + pkg.prepare_virtualenv() assert path.isdir(temp_workspace) assert path.isdir(path.join(temp_workspace, 'venv')) if sys.platform == 'win32' or sys.platform == 'cygwin': @@ -63,6 +64,11 @@ def test_install_requirements(): assert path.isdir(path.join(site_packages, '_pytest')) +def test_existing_virtualenv(): + pkg = package.Package(TESTING_TEMP_DIR, 'abc') + assert pkg._pkg_venv == 'abc' + + def test_package(): pkg = package.Package(TESTING_TEMP_DIR) pkg.package()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 4 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[all]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.7", "reqs_path": [ "tests/pksetup_data/pksetupunit1/requirements.txt", "tests/pksetup_data/pksetupunit2/requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
boto3==1.1.4 botocore==1.2.11 certifi @ file:///croot/certifi_1671487769961/work/certifi coverage==7.2.7 distlib==0.3.9 docutils==0.20.1 exceptiongroup==1.2.2 execnet==2.0.2 filelock==3.12.2 futures==2.2.0 importlib-metadata==6.7.0 iniconfig==2.0.0 jmespath==0.10.0 -e git+https://github.com/rackerlabs/lambda-uploader.git@abf7e64f20294e7f6f44169ccb496f61018667b7#egg=lambda_uploader packaging==24.0 platformdirs==4.0.0 pluggy==1.2.0 pytest==7.4.4 pytest-asyncio==0.21.2 pytest-cov==4.1.0 pytest-mock==3.11.1 pytest-xdist==3.5.0 python-dateutil==2.9.0.post0 six==1.17.0 tomli==2.0.1 typing_extensions==4.7.1 virtualenv==20.26.6 zipp==3.15.0
name: lambda-uploader channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - boto3==1.1.4 - botocore==1.2.11 - coverage==7.2.7 - distlib==0.3.9 - docutils==0.20.1 - exceptiongroup==1.2.2 - execnet==2.0.2 - filelock==3.12.2 - futures==2.2.0 - importlib-metadata==6.7.0 - iniconfig==2.0.0 - jmespath==0.10.0 - packaging==24.0 - platformdirs==4.0.0 - pluggy==1.2.0 - pytest==7.4.4 - pytest-asyncio==0.21.2 - pytest-cov==4.1.0 - pytest-mock==3.11.1 - pytest-xdist==3.5.0 - python-dateutil==2.9.0.post0 - six==1.17.0 - tomli==2.0.1 - typing-extensions==4.7.1 - virtualenv==20.26.6 - zipp==3.15.0 prefix: /opt/conda/envs/lambda-uploader
[ "test/test_package.py::test_prepare_workspace", "test/test_package.py::test_existing_virtualenv" ]
[ "test/test_package.py::test_install_requirements" ]
[ "test/test_package.py::test_package_zip_location", "test/test_package.py::test_package_clean_workspace", "test/test_package.py::test_package" ]
[]
Apache License 2.0
302
joke2k__django-environ-57
0033650843d1be51b4e05dc3ccb58bbfee44e42c
2015-11-19 12:21:44
c2620021614557abe197578f99deeef42af3e082
diff --git a/environ/environ.py b/environ/environ.py index ca07f93..601523d 100644 --- a/environ/environ.py +++ b/environ/environ.py @@ -330,7 +330,7 @@ class Env(object): >>> from environ import Env >>> Env.db_url_config('sqlite:////full/path/to/your/file.sqlite') - {'ENGINE': 'django.db.backends.sqlite3', 'HOST': None, 'NAME': '/full/path/to/your/file.sqlite', 'PASSWORD': None, 'PORT': None, 'USER': None} + {'ENGINE': 'django.db.backends.sqlite3', 'HOST': '', 'NAME': '/full/path/to/your/file.sqlite', 'PASSWORD': '', 'PORT': '', 'USER': ''} >>> Env.db_url_config('postgres://uf07k1i6d8ia0v:[email protected]:5431/d8r82722r2kuvn') {'ENGINE': 'django.db.backends.postgresql_psycopg2', 'HOST': 'ec2-107-21-253-135.compute-1.amazonaws.com', 'NAME': 'd8r82722r2kuvn', 'PASSWORD': 'wegauwhgeuioweg', 'PORT': 5431, 'USER': 'uf07k1i6d8ia0v'} @@ -364,11 +364,11 @@ class Env(object): # Update with environment configuration. config.update({ - 'NAME': path, - 'USER': url.username, - 'PASSWORD': url.password, - 'HOST': url.hostname, - 'PORT': _cast_int(url.port), + 'NAME': path or '', + 'USER': url.username or '', + 'PASSWORD': url.password or '', + 'HOST': url.hostname or '', + 'PORT': _cast_int(url.port) or '', }) if url.query:
Unspecified database properties default to `None` rather than the empty string Using django-environ v0.4.0 on Python v2.7.10. **STR:** ```bash $ DATABASE_URL='mysql://root@localhost/test_db' $ python >>> import json >>> import environ >>> env = environ.Env() >>> print(json.dumps(env.db_url('DATABASE_URL'), indent=2)) ``` (Travis' MySQL setup has no password, hence the `DATABASE_URL` value above.) **Expected:** ```json { "ENGINE": "django.db.backends.mysql", "NAME": "test_db", "HOST": "localhost", "USER": "root", "PASSWORD": "", "PORT": "" } ``` ...or else: ```json { "ENGINE": "django.db.backends.mysql", "NAME": "test_db", "HOST": "localhost", "USER": "root" } ``` Django has defaults for those properties - which are the empty string: [docs](https://docs.djangoproject.com/en/1.8/ref/settings/#password) **Actual:** ```json { "ENGINE": "django.db.backends.mysql", "NAME": "test_db", "HOST": "localhost", "USER": "root", "PASSWORD": null, "PORT": null } ```
joke2k/django-environ
diff --git a/environ/test.py b/environ/test.py index 7f66c34..6761f00 100644 --- a/environ/test.py +++ b/environ/test.py @@ -157,7 +157,7 @@ class EnvTests(BaseTests): self.assertEqual(mysql_config['HOST'], 'us-cdbr-east.cleardb.com') self.assertEqual(mysql_config['USER'], 'bea6eb0') self.assertEqual(mysql_config['PASSWORD'], '69772142') - self.assertEqual(mysql_config['PORT'], None) + self.assertEqual(mysql_config['PORT'], '') mysql_gis_config = self.env.db('DATABASE_MYSQL_GIS_URL') self.assertEqual(mysql_gis_config['ENGINE'], 'django.contrib.gis.db.backends.mysql') @@ -165,7 +165,7 @@ class EnvTests(BaseTests): self.assertEqual(mysql_gis_config['HOST'], '127.0.0.1') self.assertEqual(mysql_gis_config['USER'], 'user') self.assertEqual(mysql_gis_config['PASSWORD'], 'password') - self.assertEqual(mysql_gis_config['PORT'], None) + self.assertEqual(mysql_gis_config['PORT'], '') sqlite_config = self.env.db('DATABASE_SQLITE_URL') self.assertEqual(sqlite_config['ENGINE'], 'django.db.backends.sqlite3') @@ -276,7 +276,18 @@ class DatabaseTestSuite(unittest.TestCase): self.assertEqual(url['HOST'], 'us-cdbr-east.cleardb.com') self.assertEqual(url['USER'], 'bea6eb025ca0d8') self.assertEqual(url['PASSWORD'], '69772142') - self.assertEqual(url['PORT'], None) + self.assertEqual(url['PORT'], '') + + def test_mysql_no_password(self): + url = 'mysql://travis@localhost/test_db' + url = Env.db_url_config(url) + + self.assertEqual(url['ENGINE'], 'django.db.backends.mysql') + self.assertEqual(url['NAME'], 'test_db') + self.assertEqual(url['HOST'], 'localhost') + self.assertEqual(url['USER'], 'travis') + self.assertEqual(url['PASSWORD'], '') + self.assertEqual(url['PORT'], '') def test_empty_sqlite_url(self): url = 'sqlite://' @@ -309,7 +320,7 @@ class DatabaseTestSuite(unittest.TestCase): self.assertEqual(url['ENGINE'], 'ldapdb.backends.ldap') self.assertEqual(url['HOST'], 'ldap.nodomain.org') - self.assertEqual(url['PORT'], None) + self.assertEqual(url['PORT'], '') self.assertEqual(url['NAME'], 'ldap://ldap.nodomain.org') self.assertEqual(url['USER'], 'cn=admin,dc=nodomain,dc=org') self.assertEqual(url['PASSWORD'], 'some_secret_password')
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
0.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
asgiref==3.4.1 attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 Django==3.2.25 -e git+https://github.com/joke2k/django-environ.git@0033650843d1be51b4e05dc3ccb58bbfee44e42c#egg=django_environ importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 pytz==2025.2 six==1.17.0 sqlparse==0.4.4 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: django-environ channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - asgiref==3.4.1 - django==3.2.25 - pytz==2025.2 - six==1.17.0 - sqlparse==0.4.4 prefix: /opt/conda/envs/django-environ
[ "environ/test.py::EnvTests::test_db_url_value", "environ/test.py::FileEnvTests::test_db_url_value", "environ/test.py::DatabaseTestSuite::test_cleardb_parsing", "environ/test.py::DatabaseTestSuite::test_database_ldap_url", "environ/test.py::DatabaseTestSuite::test_mysql_no_password" ]
[]
[ "environ/test.py::EnvTests::test_bool_false", "environ/test.py::EnvTests::test_bool_true", "environ/test.py::EnvTests::test_cache_url_value", "environ/test.py::EnvTests::test_dict_parsing", "environ/test.py::EnvTests::test_dict_value", "environ/test.py::EnvTests::test_email_url_value", "environ/test.py::EnvTests::test_empty_list", "environ/test.py::EnvTests::test_float", "environ/test.py::EnvTests::test_int", "environ/test.py::EnvTests::test_int_list", "environ/test.py::EnvTests::test_int_tuple", "environ/test.py::EnvTests::test_int_with_none_default", "environ/test.py::EnvTests::test_json_value", "environ/test.py::EnvTests::test_not_present_with_default", "environ/test.py::EnvTests::test_not_present_without_default", "environ/test.py::EnvTests::test_path", "environ/test.py::EnvTests::test_proxied_value", "environ/test.py::EnvTests::test_str", "environ/test.py::EnvTests::test_str_list_with_spaces", "environ/test.py::EnvTests::test_url_value", "environ/test.py::FileEnvTests::test_bool_false", "environ/test.py::FileEnvTests::test_bool_true", "environ/test.py::FileEnvTests::test_cache_url_value", "environ/test.py::FileEnvTests::test_dict_parsing", "environ/test.py::FileEnvTests::test_dict_value", "environ/test.py::FileEnvTests::test_email_url_value", "environ/test.py::FileEnvTests::test_empty_list", "environ/test.py::FileEnvTests::test_float", "environ/test.py::FileEnvTests::test_int", "environ/test.py::FileEnvTests::test_int_list", "environ/test.py::FileEnvTests::test_int_tuple", "environ/test.py::FileEnvTests::test_int_with_none_default", "environ/test.py::FileEnvTests::test_json_value", "environ/test.py::FileEnvTests::test_not_present_with_default", "environ/test.py::FileEnvTests::test_not_present_without_default", "environ/test.py::FileEnvTests::test_path", "environ/test.py::FileEnvTests::test_proxied_value", "environ/test.py::FileEnvTests::test_str", "environ/test.py::FileEnvTests::test_str_list_with_spaces", "environ/test.py::FileEnvTests::test_url_value", "environ/test.py::SchemaEnvTests::test_schema", "environ/test.py::DatabaseTestSuite::test_database_options_parsing", "environ/test.py::DatabaseTestSuite::test_empty_sqlite_url", "environ/test.py::DatabaseTestSuite::test_memory_sqlite_url", "environ/test.py::DatabaseTestSuite::test_mysql_gis_parsing", "environ/test.py::DatabaseTestSuite::test_postgis_parsing", "environ/test.py::DatabaseTestSuite::test_postgres_parsing", "environ/test.py::CacheTestSuite::test_custom_backend", "environ/test.py::CacheTestSuite::test_dbcache_parsing", "environ/test.py::CacheTestSuite::test_dummycache_parsing", "environ/test.py::CacheTestSuite::test_filecache_parsing", "environ/test.py::CacheTestSuite::test_filecache_windows_parsing", "environ/test.py::CacheTestSuite::test_locmem_named_parsing", "environ/test.py::CacheTestSuite::test_locmem_parsing", "environ/test.py::CacheTestSuite::test_memcache_multiple_parsing", "environ/test.py::CacheTestSuite::test_memcache_parsing", "environ/test.py::CacheTestSuite::test_memcache_pylib_parsing", "environ/test.py::CacheTestSuite::test_memcache_socket_parsing", "environ/test.py::CacheTestSuite::test_options_parsing", "environ/test.py::CacheTestSuite::test_redis_parsing", "environ/test.py::CacheTestSuite::test_redis_socket_parsing", "environ/test.py::SearchTestSuite::test_common_args_parsing", "environ/test.py::SearchTestSuite::test_elasticsearch_parsing", "environ/test.py::SearchTestSuite::test_simple_parsing", "environ/test.py::SearchTestSuite::test_solr_multicore_parsing", "environ/test.py::SearchTestSuite::test_solr_parsing", "environ/test.py::SearchTestSuite::test_whoosh_parsing", "environ/test.py::SearchTestSuite::test_xapian_parsing", "environ/test.py::EmailTests::test_smtp_parsing", "environ/test.py::PathTests::test_comparison", "environ/test.py::PathTests::test_path_class", "environ/test.py::PathTests::test_required_path" ]
[]
MIT License
303
ARMmbed__yotta-586
852c1e498fbb12938fa28aa388cf2b2b650508fe
2015-11-19 17:32:49
852c1e498fbb12938fa28aa388cf2b2b650508fe
diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000..0150ea4 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,5 @@ +[run] +parallel=True +concurrency=multiprocessing +include=./yotta/* + diff --git a/tox.ini b/tox.ini index 20af1fb..06075d3 100644 --- a/tox.ini +++ b/tox.ini @@ -6,14 +6,16 @@ deps= cython pylint coverage -setenv= - COVERAGE_PROCESS_START = {toxinidir}/.coveragerc +passenv= + SSH_AUTH_SOCK commands= pip install . - coverage erase - coverage run --parallel-mode setup.py test - coverage combine - coverage report --include="yotta/*" + python setup.py test + # disable coverage for now: subprocesses aren't being combined correctly + # coverage erase + # coverage run --parallel-mode setup.py test + # coverage combine + # coverage report --include="yotta/*" py27: pylint ./yotta py33: pylint ./yotta py34: pylint ./yotta diff --git a/yotta/lib/validate.py b/yotta/lib/validate.py index 1ff5b8e..ed38de8 100644 --- a/yotta/lib/validate.py +++ b/yotta/lib/validate.py @@ -18,10 +18,12 @@ import pack Source_Dir_Regex = re.compile('^[a-z0-9_-]*$') Source_Dir_Invalid_Regex = re.compile('[^a-z0-9_-]*') -Component_Name_Regex = re.compile('^[a-z0-9-]*$') Component_Name_Replace_With_Dash = re.compile('[^a-z0-9]+') Looks_Like_An_Email = re.compile('^[^@]+@[^@]+\.[^@]+$') +Component_Name_Regex = r'^[a-z]+[a-z0-9-]*$' +Target_Name_Regex = r'^[a-z]+[a-z0-9+-]*$' + # return an error string describing the validation failure, or None if there is # no error def sourceDirValidationError(dirname, component_name): @@ -41,10 +43,15 @@ def sourceDirValidationError(dirname, component_name): return None def componentNameValidationError(component_name): - if not Component_Name_Regex.match(component_name): + if not re.match(Component_Name_Regex, component_name): return 'Module name "%s" is invalid - must contain only lowercase a-z, 0-9 and hyphen, with no spaces.' % component_name return None +def targetNameValidationError(target_name): + if not re.match(Target_Name_Regex, target_name): + return 'Module name "%s" is invalid - must contain only lowercase a-z, 0-9 and hyphen, with no spaces.' % target_name + return None + def componentNameCoerced(component_name): return Component_Name_Replace_With_Dash.sub('-', component_name.lower()) @@ -67,6 +74,18 @@ def currentDirectoryModule(): return None return c +def currentDirectoryTarget(): + try: + t = target.Target(os.getcwd()) + except pack.InvalidDescription as e: + logging.error(e) + return None + if not t: + logging.error(str(t.error)) + logging.error('The current directory does not contain a valid target.') + return None + return t + def currentDirectoryModuleOrTarget(): wd = os.getcwd() errors = [] diff --git a/yotta/main.py b/yotta/main.py index f1d6055..7147d01 100644 --- a/yotta/main.py +++ b/yotta/main.py @@ -25,7 +25,7 @@ from .lib import detect import yotta.lib.globalconf as globalconf # hook to support coverage information when yotta runs itself during tests: -if 'COVERAGE_PROCESS_START' is os.environ: +if 'COVERAGE_PROCESS_START' in os.environ: import coverage coverage.process_startup() @@ -92,7 +92,7 @@ def main(): description='Build software using re-usable components.\n'+ 'For more detailed help on each subcommand, run: yotta <subcommand> --help' ) - subparser = parser.add_subparsers(metavar='<subcommand>') + subparser = parser.add_subparsers(dest='subcommand_name', metavar='<subcommand>') parser.add_argument('--version', nargs=0, action=FastVersionAction, help='display the version' @@ -145,7 +145,8 @@ def main(): 'Search for open-source modules and targets that have been published '+ 'to the yotta registry (with yotta publish). See help for `yotta '+ 'install` for installing modules, and for `yotta target` for '+ - 'switching targets.' + 'switching targets.', + 'Search for published modules and targets' ) addParser('init', 'init', 'Create a new module.') addParser('install', 'install', @@ -164,8 +165,20 @@ def main(): 'Build the current module.' ) addParser('version', 'version', 'Bump the module version, or (with no arguments) display the current version.') - addParser('link', 'link', 'Symlink a module.') - addParser('link-target', 'link_target', 'Symlink a target.') + addParser('link', 'link', + 'Symlink a module to be used in another module. Use "yotta link" '+ + '(with no arguments) to link the current module globally. Or use '+ + '"yotta link module-name" To use a module that was previously linked '+ + 'globally in the current module.', + 'Symlink a module' + ) + addParser('link-target', 'link_target', + 'Symlink a target to be used in another module. Use "yotta link-target" '+ + '(with no arguments) to link the current target globally. Or use '+ + '"yotta link-target target-name" To use a target that was previously linked '+ + 'globally in the current module.', + 'Symlink a target' + ) addParser('update', 'update', 'Update dependencies for the current module, or a specific module.') addParser('target', 'target', 'Set or display the target device.') addParser('debug', 'debug', 'Attach a debugger to the current target. Requires target support.') @@ -186,7 +199,12 @@ def main(): addParser('list', 'list', 'List the dependencies of the current module, or the inherited targets of the current target.') addParser('outdated', 'outdated', 'Display information about dependencies which have newer versions available.') addParser('uninstall', 'uninstall', 'Remove a specific dependency of the current module, both from module.json and from disk.') - addParser('remove', 'remove', 'Remove the downloaded version of a dependency, or un-link a linked module.') + addParser('remove', 'remove', + 'Remove the downloaded version of a dependency module or target, or '+ + 'un-link a linked module or target (see yotta link --help for details '+ + 'of linking). This command does not modify your module.json file.', + 'Remove or unlink a dependency without removing it from module.json.' + ) addParser('owners', 'owners', 'Add/remove/display the owners of a module or target.') addParser('licenses', 'licenses', 'List the licenses of the current module and its dependencies.') addParser('clean', 'clean', 'Remove files created by yotta and the build.') @@ -195,16 +213,17 @@ def main(): # short synonyms, subparser.choices is a dictionary, so use update() to # merge in the keys from another dictionary short_commands = { - 'up':subparser.choices['update'], - 'in':subparser.choices['install'], - 'ln':subparser.choices['link'], - 'v':subparser.choices['version'], - 'ls':subparser.choices['list'], - 'rm':subparser.choices['remove'], - 'unlink':subparser.choices['remove'], - 'owner':subparser.choices['owners'], - 'lics':subparser.choices['licenses'], - 'who':subparser.choices['whoami'] + 'up':subparser.choices['update'], + 'in':subparser.choices['install'], + 'ln':subparser.choices['link'], + 'v':subparser.choices['version'], + 'ls':subparser.choices['list'], + 'rm':subparser.choices['remove'], + 'unlink':subparser.choices['remove'], + 'unlink-target':subparser.choices['remove'], + 'owner':subparser.choices['owners'], + 'lics':subparser.choices['licenses'], + 'who':subparser.choices['whoami'] } subparser.choices.update(short_commands) diff --git a/yotta/remove.py b/yotta/remove.py index e734003..c961616 100644 --- a/yotta/remove.py +++ b/yotta/remove.py @@ -14,22 +14,58 @@ from .lib import validate def addOptions(parser): - parser.add_argument('component', - help='Name of the dependency to remove' + parser.add_argument('module', default=None, nargs='?', metavar='<module>', + help='Name of the module to remove. If omitted the current module '+ + 'or target will be removed from the global linking directory.' ) def execCommand(args, following_args): - err = validate.componentNameValidationError(args.component) - if err: - logging.error(err) - return 1 - c = validate.currentDirectoryModule() - if not c: + module_or_target = 'module' + if 'target' in args.subcommand_name: + module_or_target = 'target' + if args.module is not None: + return removeDependency(args, module_or_target) + else: + return removeGlobally(module_or_target) + +def rmLinkOrDirectory(path, nonexistent_warning): + if not os.path.exists(path): + logging.warning(nonexistent_warning) return 1 - path = os.path.join(c.modulesPath(), args.component) if fsutils.isLink(path): fsutils.rmF(path) else: fsutils.rmRf(path) + return 0 + +def removeGlobally(module_or_target): + # folders, , get places to install things, internal + from .lib import folders + if module_or_target == 'module': + global_dir = folders.globalInstallDirectory() + p = validate.currentDirectoryModule() + else: + global_dir = folders.globalTargetInstallDirectory() + p = validate.currentDirectoryTarget() + if p is None: + return 1 + path = os.path.join(global_dir, p.getName()) + return rmLinkOrDirectory(path, ('%s is not linked globally' % p.getName())) + +def removeDependency(args, module_or_target): + c = validate.currentDirectoryModule() + if not c: + return 1 + if module_or_target == 'module': + subdir = c.modulesPath() + err = validate.componentNameValidationError(args.module) + else: + subdir = c.targetsPath() + err = validate.targetNameValidationError(args.module) + if err: + logging.error(err) + return 1 + path = os.path.join(subdir, args.module) + return rmLinkOrDirectory(path, '%s %s not found' % (('dependency', 'target')[module_or_target=='target'], args.module))
c:\ytexe yt link does not install module ## Problem To add a local library module to a executable module it requires 3 steps `c:\ytlib yt link` `c:\ytexe yt link ytlib` If you run `yt ls` at this point there is no indication that `ytlib` is linked into the project, you must either `yt install` or `yt build` to get `ytlib` added to your module.json file `c:\ytexe yt install ytlib` At this point you can now see `ytlib` in a `yt ls` command output. ### example I'm a developer developing locally. I want to add simplelog to my project to help me debug and maybe tweak a few pretty print settings in simplelog for fun. So I make the yotta executable `ytexe`. I clone the simplelog repo to my local machine so they are both at my root directory. Now to add simplelog to my project I must `C:\simplelog yt link` `C:\ytexe yt link simplelog` at this point I have added simplelog calls into my code and run `yt build`, which fails, because the simplelog module has not been added to my module.json, nor does it show up in a `yt ls` command. I must run `yt install simplelog`. This may be expected behaviour, and maybe we dont want to change it, but at the very least it makes for a bad user experience. ## Solution 1) have a `yt link <absolute file path>` command that takes care of all these steps or 2) have `c:\ytexe yt link ytlib` add the module to the module.json or otherwise make it obvious that it is added to the project, currently there is no feedback. or 3) when a user runs `c:\ytexe yt link ytlib` give them a feedback message telling them they need to `yt install ytlib` to finish adding the module. I think this solution is the least optimum because it requires 3 steps instead of 1, but it is the miminum required for user interaction.
ARMmbed/yotta
diff --git a/yotta/test/cli/build.py b/yotta/test/cli/build.py index bbd0dbb..3ff37bf 100644 --- a/yotta/test/cli/build.py +++ b/yotta/test/cli/build.py @@ -6,17 +6,14 @@ # standard library modules, , , import unittest -import os -import tempfile import subprocess import copy import re import datetime # internal modules: -from yotta.lib.fsutils import mkDirP, rmRf -from yotta.lib.detect import systemDefaultTarget from . import cli +from . import util Test_Complex = { 'module.json': '''{ @@ -87,59 +84,7 @@ int main(){ ''' } - -Test_Trivial_Lib = { -'module.json':'''{ - "name": "test-trivial-lib", - "version": "0.0.2", - "description": "Module to test trivial lib compilation", - "licenses": [ - { - "url": "https://spdx.org/licenses/Apache-2.0", - "type": "Apache-2.0" - } - ], - "dependencies": { - } -}''', - -'test-trivial-lib/lib.h': ''' -int foo(); -''', - -'source/lib.c':''' -#include "test-trivial-lib/lib.h" - -int foo(){ - return 7; -} -''' -} - -Test_Trivial_Exe = { -'module.json':'''{ - "name": "test-trivial-exe", - "version": "0.0.2", - "description": "Module to test trivial exe compilation", - "licenses": [ - { - "url": "https://spdx.org/licenses/Apache-2.0", - "type": "Apache-2.0" - } - ], - "dependencies": { - }, - "bin":"./source" -}''', - -'source/lib.c':''' -int main(){ - return 0; -} -''' -} - -Test_Build_Info = copy.copy(Test_Trivial_Exe) +Test_Build_Info = copy.copy(util.Test_Trivial_Exe) Test_Build_Info['source/lib.c'] = ''' #include "stdio.h" #include YOTTA_BUILD_INFO_HEADER @@ -202,84 +147,66 @@ int foo(){ 'test/g/a/a/b/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }' } -def isWindows(): - # can't run tests that hit github without an authn token - return os.name == 'nt' - class TestCLIBuild(unittest.TestCase): - def writeTestFiles(self, files, add_space_in_path=False): - test_dir = tempfile.mkdtemp() - if add_space_in_path: - test_dir = test_dir + ' spaces in path' - - for path, contents in files.items(): - path_dir, file_name = os.path.split(path) - path_dir = os.path.join(test_dir, path_dir) - mkDirP(path_dir) - with open(os.path.join(path_dir, file_name), 'w') as f: - f.write(contents) - return test_dir - - - @unittest.skipIf(isWindows(), "can't build natively on windows yet") + @unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet") def test_buildTrivialLib(self): - test_dir = self.writeTestFiles(Test_Trivial_Lib) + test_dir = util.writeTestFiles(util.Test_Trivial_Lib) - stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir) + stdout = self.runCheckCommand(['--target', util.nativeTarget(), 'build'], test_dir) - rmRf(test_dir) + util.rmRf(test_dir) - @unittest.skipIf(isWindows(), "can't build natively on windows yet") + @unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet") def test_buildTrivialExe(self): - test_dir = self.writeTestFiles(Test_Trivial_Exe) + test_dir = util.writeTestFiles(util.Test_Trivial_Exe) - stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir) + stdout = self.runCheckCommand(['--target', util.nativeTarget(), 'build'], test_dir) - rmRf(test_dir) + util.rmRf(test_dir) - @unittest.skipIf(isWindows(), "can't build natively on windows yet") + @unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet") def test_buildComplex(self): - test_dir = self.writeTestFiles(Test_Complex) + test_dir = util.writeTestFiles(Test_Complex) - stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir) + stdout = self.runCheckCommand(['--target', util.nativeTarget(), 'build'], test_dir) - rmRf(test_dir) + util.rmRf(test_dir) - @unittest.skipIf(isWindows(), "can't build natively on windows yet") + @unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet") def test_buildComplexSpaceInPath(self): - test_dir = self.writeTestFiles(Test_Complex, True) + test_dir = util.writeTestFiles(Test_Complex, True) - stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir) + stdout = self.runCheckCommand(['--target', util.nativeTarget(), 'build'], test_dir) - rmRf(test_dir) + util.rmRf(test_dir) - @unittest.skipIf(isWindows(), "can't build natively on windows yet") + @unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet") def test_buildTests(self): - test_dir = self.writeTestFiles(Test_Tests, True) - stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir) - stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'test'], test_dir) + test_dir = util.writeTestFiles(Test_Tests, True) + stdout = self.runCheckCommand(['--target', util.nativeTarget(), 'build'], test_dir) + stdout = self.runCheckCommand(['--target', util.nativeTarget(), 'test'], test_dir) self.assertIn('test-a', stdout) self.assertIn('test-c', stdout) self.assertIn('test-d', stdout) self.assertIn('test-e', stdout) self.assertIn('test-f', stdout) self.assertIn('test-g', stdout) - rmRf(test_dir) + util.rmRf(test_dir) - @unittest.skipIf(isWindows(), "can't build natively on windows yet") + @unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet") def test_buildInfo(self): - test_dir = self.writeTestFiles(Test_Build_Info, True) + test_dir = util.writeTestFiles(Test_Build_Info, True) # commit all the test files to git so that the VCS build info gets # defined: subprocess.check_call(['git', 'init', '-q'], cwd=test_dir) subprocess.check_call(['git', 'add', '.'], cwd=test_dir) subprocess.check_call(['git', 'commit', '-m', 'test build info automated commit', '-q'], cwd=test_dir) - self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir) + self.runCheckCommand(['--target', util.nativeTarget(), 'build'], test_dir) build_time = datetime.datetime.utcnow() - output = subprocess.check_output(['./build/' + systemDefaultTarget().split(',')[0] + '/source/test-trivial-exe'], cwd=test_dir).decode() + output = subprocess.check_output(['./build/' + util.nativeTarget().split(',')[0] + '/source/test-trivial-exe'], cwd=test_dir).decode() self.assertIn('vcs clean: 1', output) # check build timestamp diff --git a/yotta/test/cli/cli.py b/yotta/test/cli/cli.py index 3017aa2..541cedb 100644 --- a/yotta/test/cli/cli.py +++ b/yotta/test/cli/cli.py @@ -24,6 +24,10 @@ def run(arguments, cwd='.'): stdin = subprocess.PIPE ) out, err = child.communicate() + # no command should ever produce a traceback: + if 'traceback' in (out.decode('utf-8')+err.decode('utf-8')).lower(): + print(out+err) + assert(False) return out.decode('utf-8'), err.decode('utf-8'), child.returncode diff --git a/yotta/test/cli/link.py b/yotta/test/cli/link.py new file mode 100644 index 0000000..eddb5c5 --- /dev/null +++ b/yotta/test/cli/link.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 +# See LICENSE file for details. + + +# standard library modules, , , +import unittest +import os +import tempfile + +# internal modules: +from yotta.lib.folders import globalInstallDirectory + +from . import cli +from . import util + +Test_Target = 'x86-linux-native' + +class TestCLILink(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.prefix_dir = tempfile.mkdtemp() + os.environ['YOTTA_PREFIX'] = cls.prefix_dir + + @classmethod + def tearDownClass(cls): + util.rmRf(cls.prefix_dir) + cls.prefix_dir = None + + def testLink(self): + linked_in_module = util.writeTestFiles(util.Test_Trivial_Lib, True) + + stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'link'], cwd=linked_in_module) + self.assertEqual(statuscode, 0) + self.assertTrue(os.path.exists(os.path.join(globalInstallDirectory(), 'test-trivial-lib'))) + + test_module = util.writeTestFiles(util.Test_Testing_Trivial_Lib_Dep, True) + stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'list'], cwd=test_module) + self.assertIn('missing', stdout+stderr) + + stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'link', 'test-trivial-lib'], cwd=test_module) + self.assertEqual(statuscode, 0) + self.assertNotIn('broken', stdout+stderr) + + stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'list'], cwd=test_module) + self.assertNotIn('missing', stdout+stderr) + + util.rmRf(test_module) + util.rmRf(linked_in_module) + + @unittest.skipIf(not util.canBuildNatively(), "can't build natively on this platform yet") + def testLinkedBuild(self): + linked_in_module = util.writeTestFiles(util.Test_Trivial_Lib, True) + test_module = util.writeTestFiles(util.Test_Testing_Trivial_Lib_Dep, True) + + stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'link'], cwd=linked_in_module) + self.assertEqual(statuscode, 0) + stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'link', 'test-trivial-lib'], cwd=test_module) + self.assertEqual(statuscode, 0) + stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'build'], cwd=test_module) + self.assertEqual(statuscode, 0) + + util.rmRf(test_module) + util.rmRf(linked_in_module) + + @unittest.skipIf(not util.canBuildNatively(), "can't build natively on this platform yet") + def testLinkedReBuild(self): + # test that changing which module is linked triggers a re-build + linked_in_module_1 = util.writeTestFiles(util.Test_Trivial_Lib, True) + linked_in_module_2 = util.writeTestFiles(util.Test_Trivial_Lib, True) + test_module = util.writeTestFiles(util.Test_Testing_Trivial_Lib_Dep, True) + + stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'link'], cwd=linked_in_module_1) + self.assertEqual(statuscode, 0) + stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'link', 'test-trivial-lib'], cwd=test_module) + self.assertEqual(statuscode, 0) + stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'build'], cwd=test_module) + self.assertEqual(statuscode, 0) + + # check that rebuild is no-op + stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'build'], cwd=test_module) + self.assertIn('no work to do', stdout+stderr) + self.assertEqual(statuscode, 0) + + stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'link'], cwd=linked_in_module_2) + self.assertEqual(statuscode, 0) + + stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'build'], cwd=test_module) + self.assertNotIn('no work to do', stdout+stderr) + self.assertEqual(statuscode, 0) + + util.rmRf(test_module) + util.rmRf(linked_in_module_1) + util.rmRf(linked_in_module_2) + + @unittest.skipIf(not util.canBuildNatively(), "can't build natively on this platform yet") + def testTargetLinkedBuild(self): + linked_in_target = util.writeTestFiles(util.getNativeTargetDescription(), True) + test_module = util.writeTestFiles(util.Test_Testing_Trivial_Lib_Dep_Preinstalled, True) + + stdout, stderr, statuscode = cli.run(['-t', 'test-native-target', '--plain', 'link-target'], cwd=linked_in_target) + self.assertEqual(statuscode, 0) + stdout, stderr, statuscode = cli.run(['-t', 'test-native-target', '--plain', 'link-target', 'test-native-target'], cwd=test_module) + self.assertEqual(statuscode, 0) + stdout, stderr, statuscode = cli.run(['-t', 'test-native-target', '--plain', 'build'], cwd=test_module) + self.assertEqual(statuscode, 0) + + util.rmRf(test_module) + util.rmRf(linked_in_target) + diff --git a/yotta/test/cli/outdated.py b/yotta/test/cli/outdated.py index 15fbed4..be8eb4d 100644 --- a/yotta/test/cli/outdated.py +++ b/yotta/test/cli/outdated.py @@ -6,11 +6,9 @@ # standard library modules, , , import unittest -import os -import tempfile # internal modules: -from yotta.lib.fsutils import mkDirP, rmRf +from . import util from . import cli Test_Outdated = { @@ -42,30 +40,17 @@ int foo(){ } class TestCLIOutdated(unittest.TestCase): - def writeTestFiles(self, files, add_space_in_path=False): - test_dir = tempfile.mkdtemp() - if add_space_in_path: - test_dir = test_dir + ' spaces in path' - - for path, contents in files.items(): - path_dir, file_name = os.path.split(path) - path_dir = os.path.join(test_dir, path_dir) - mkDirP(path_dir) - with open(os.path.join(path_dir, file_name), 'w') as f: - f.write(contents) - return test_dir - def test_outdated(self): - path = self.writeTestFiles(Test_Outdated, True) + path = util.writeTestFiles(Test_Outdated, True) stdout, stderr, statuscode = cli.run(['-t', 'x86-linux-native', 'outdated'], cwd=path) self.assertNotEqual(statuscode, 0) self.assertIn('test-testing-dummy', stdout + stderr) - rmRf(path) + util.rmRf(path) def test_notOutdated(self): - path = self.writeTestFiles(Test_Outdated, True) + path = util.writeTestFiles(Test_Outdated, True) stdout, stderr, statuscode = cli.run(['-t', 'x86-linux-native', 'up'], cwd=path) self.assertEqual(statuscode, 0) @@ -74,4 +59,4 @@ class TestCLIOutdated(unittest.TestCase): self.assertEqual(statuscode, 0) self.assertNotIn('test-testing-dummy', stdout + stderr) - rmRf(path) + util.rmRf(path) diff --git a/yotta/test/cli/test.py b/yotta/test/cli/test.py index 6a243d6..ccec431 100644 --- a/yotta/test/cli/test.py +++ b/yotta/test/cli/test.py @@ -6,15 +6,12 @@ # standard library modules, , , import unittest -import os -import tempfile import copy # internal modules: -from yotta.lib.fsutils import mkDirP, rmRf from yotta.lib.detect import systemDefaultTarget from . import cli - +from . import util Test_Tests = { 'module.json':'''{ @@ -103,26 +100,10 @@ Test_Fitler_NotFound['module.json'] = '''{ } }''' -def isWindows(): - return os.name == 'nt' - class TestCLITest(unittest.TestCase): - def writeTestFiles(self, files, add_space_in_path=False): - test_dir = tempfile.mkdtemp() - if add_space_in_path: - test_dir = test_dir + ' spaces in path' - - for path, contents in files.items(): - path_dir, file_name = os.path.split(path) - path_dir = os.path.join(test_dir, path_dir) - mkDirP(path_dir) - with open(os.path.join(path_dir, file_name), 'w') as f: - f.write(contents) - return test_dir - - @unittest.skipIf(isWindows(), "can't build natively on windows yet") + @unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet") def test_tests(self): - test_dir = self.writeTestFiles(Test_Tests, True) + test_dir = util.writeTestFiles(Test_Tests, True) output = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir) output = self.runCheckCommand(['--target', systemDefaultTarget(), 'test'], test_dir) self.assertIn('test-a passed', output) @@ -131,17 +112,17 @@ class TestCLITest(unittest.TestCase): self.assertIn('test-e passed', output) self.assertIn('test-f passed', output) self.assertIn('test-g passed', output) - rmRf(test_dir) + util.rmRf(test_dir) - @unittest.skipIf(isWindows(), "can't build natively on windows yet") + @unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet") def test_testOutputFilterPassing(self): - test_dir = self.writeTestFiles(Test_Fitler_Pass, True) + test_dir = util.writeTestFiles(Test_Fitler_Pass, True) stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'test'], test_dir) - rmRf(test_dir) + util.rmRf(test_dir) - @unittest.skipIf(isWindows(), "can't build natively on windows yet") + @unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet") def test_testOutputFilterFailing(self): - test_dir = self.writeTestFiles(Test_Fitler_Fail, True) + test_dir = util.writeTestFiles(Test_Fitler_Fail, True) stdout, stderr, statuscode = cli.run(['--target', systemDefaultTarget(), 'test'], cwd=test_dir) if statuscode == 0: print(stdout) @@ -153,17 +134,17 @@ class TestCLITest(unittest.TestCase): self.assertIn('test-f failed', '%s %s' % (stdout, stderr)) self.assertIn('test-g failed', '%s %s' % (stdout, stderr)) self.assertNotEqual(statuscode, 0) - rmRf(test_dir) + util.rmRf(test_dir) - @unittest.skipIf(isWindows(), "can't build natively on windows yet") + @unittest.skipIf(not util.canBuildNatively(), "can't build natively on windows yet") def test_testOutputFilterNotFound(self): - test_dir = self.writeTestFiles(Test_Fitler_NotFound, True) + test_dir = util.writeTestFiles(Test_Fitler_NotFound, True) stdout, stderr, statuscode = cli.run(['--target', systemDefaultTarget(), 'test'], cwd=test_dir) if statuscode == 0: print(stdout) print(stderr) self.assertNotEqual(statuscode, 0) - rmRf(test_dir) + util.rmRf(test_dir) def runCheckCommand(self, args, test_dir): stdout, stderr, statuscode = cli.run(args, cwd=test_dir) diff --git a/yotta/test/cli/unlink.py b/yotta/test/cli/unlink.py new file mode 100644 index 0000000..ff6eda6 --- /dev/null +++ b/yotta/test/cli/unlink.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 +# See LICENSE file for details. + + +# standard library modules, , , +import unittest +import tempfile +import os + +# internal modules: +from . import cli +from . import util + +Test_Target = 'x86-linux-native' + +class TestCLIUnLink(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.prefix_dir = tempfile.mkdtemp() + os.environ['YOTTA_PREFIX'] = cls.prefix_dir + + @classmethod + def tearDownClass(cls): + util.rmRf(cls.prefix_dir) + cls.prefix_dir = None + + def testUnlinkNonexistentModule(self): + test_module = util.writeTestFiles(util.Test_Testing_Trivial_Lib_Dep, True) + stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'unlink', 'doesnotexist'], cwd=test_module) + self.assertNotEqual(statuscode, 0) + util.rmRf(test_module) + + def testUnlinkNonexistentTarget(self): + test_module = util.writeTestFiles(util.Test_Testing_Trivial_Lib_Dep, True) + stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'unlink-target', 'doesnotexist'], cwd=test_module) + self.assertNotEqual(statuscode, 0) + util.rmRf(test_module) + + def testUnlinkNotLinkedModuleGlobally(self): + test_module = util.writeTestFiles(util.Test_Testing_Trivial_Lib_Dep, True) + stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'unlink'], cwd=test_module) + self.assertNotEqual(statuscode, 0) + util.rmRf(test_module) + + def testUnlinkNotLinkedTargetGlobally(self): + test_target = util.writeTestFiles(util.getNativeTargetDescription(), True) + stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'unlink'], cwd=test_target) + self.assertNotEqual(statuscode, 0) + util.rmRf(test_target) + + def testUnlinkModuleGlobally(self): + test_module = util.writeTestFiles(util.Test_Testing_Trivial_Lib_Dep, True) + stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'link'], cwd=test_module) + self.assertEqual(statuscode, 0) + stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'unlink'], cwd=test_module) + self.assertEqual(statuscode, 0) + util.rmRf(test_module) + + def testUnlinkTargetGlobally(self): + test_target = util.writeTestFiles(util.getNativeTargetDescription(), True) + stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'link-target'], cwd=test_target) + self.assertEqual(statuscode, 0) + stdout, stderr, statuscode = cli.run(['-t', Test_Target, '--plain', 'unlink-target'], cwd=test_target) + self.assertEqual(statuscode, 0) + util.rmRf(test_target) + + def testUnlinkModule(self): + linked_in_module = util.writeTestFiles(util.Test_Trivial_Lib, True) + test_module = util.writeTestFiles(util.Test_Testing_Trivial_Lib_Dep, True) + + stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'link'], cwd=linked_in_module) + self.assertEqual(statuscode, 0) + stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'link', 'test-trivial-lib'], cwd=test_module) + self.assertEqual(statuscode, 0) + self.assertTrue(os.path.exists(os.path.join(test_module, 'yotta_modules', 'test-trivial-lib'))) + stdout, stderr, statuscode = cli.run(['-t', util.nativeTarget(), '--plain', 'unlink', 'test-trivial-lib'], cwd=test_module) + self.assertEqual(statuscode, 0) + self.assertTrue(not os.path.exists(os.path.join(test_module, 'yotta_modules', 'test-trivial-lib'))) + + util.rmRf(test_module) + util.rmRf(linked_in_module) + + @unittest.skipIf(not util.canBuildNatively(), "can't build natively on this platform yet") + def testUnlinkTarget(self): + linked_in_target = util.writeTestFiles(util.getNativeTargetDescription(), True) + test_module = util.writeTestFiles(util.Test_Testing_Trivial_Lib_Dep_Preinstalled, True) + + stdout, stderr, statuscode = cli.run(['-t', 'test-native-target', '--plain', 'link-target'], cwd=linked_in_target) + self.assertEqual(statuscode, 0) + stdout, stderr, statuscode = cli.run(['-t', 'test-native-target', '--plain', 'link-target', 'test-native-target'], cwd=test_module) + self.assertEqual(statuscode, 0) + self.assertTrue(os.path.exists(os.path.join(test_module, 'yotta_targets', 'test-native-target'))) + stdout, stderr, statuscode = cli.run(['-t', 'test-native-target', '--plain', 'unlink-target', 'test-native-target'], cwd=test_module) + self.assertEqual(statuscode, 0) + self.assertTrue(not os.path.exists(os.path.join(test_module, 'yotta_targets', 'test-native-target'))) + + util.rmRf(test_module) + util.rmRf(linked_in_target) + + diff --git a/yotta/test/cli/update.py b/yotta/test/cli/update.py index 4906fab..8581689 100644 --- a/yotta/test/cli/update.py +++ b/yotta/test/cli/update.py @@ -6,12 +6,10 @@ # standard library modules, , , import unittest -import os -import tempfile # internal modules: -from yotta.lib.fsutils import mkDirP, rmRf from . import cli +from . import util Test_Outdated = { 'module.json':'''{ @@ -42,39 +40,26 @@ int foo(){ } class TestCLIUpdate(unittest.TestCase): - def writeTestFiles(self, files, add_space_in_path=False): - test_dir = tempfile.mkdtemp() - if add_space_in_path: - test_dir = test_dir + ' spaces in path' - - for path, contents in files.items(): - path_dir, file_name = os.path.split(path) - path_dir = os.path.join(test_dir, path_dir) - mkDirP(path_dir) - with open(os.path.join(path_dir, file_name), 'w') as f: - f.write(contents) - return test_dir - def test_update(self): - path = self.writeTestFiles(Test_Outdated, True) + path = util.writeTestFiles(Test_Outdated, True) stdout, stderr, statuscode = cli.run(['-t', 'x86-linux-native', 'update'], cwd=path) self.assertEqual(statuscode, 0) self.assertIn('download test-testing-dummy', stdout + stderr) - rmRf(path) + util.rmRf(path) def test_updateExplicit(self): - path = self.writeTestFiles(Test_Outdated, True) + path = util.writeTestFiles(Test_Outdated, True) stdout, stderr, statuscode = cli.run(['-t', 'x86-linux-native', 'update', 'test-testing-dummy'], cwd=path) self.assertEqual(statuscode, 0) self.assertIn('download test-testing-dummy', stdout + stderr) - rmRf(path) + util.rmRf(path) def test_updateNothing(self): - path = self.writeTestFiles(Test_Outdated, True) + path = util.writeTestFiles(Test_Outdated, True) stdout, stderr, statuscode = cli.run(['-t', 'x86-linux-native', 'up'], cwd=path) self.assertEqual(statuscode, 0) @@ -84,4 +69,4 @@ class TestCLIUpdate(unittest.TestCase): self.assertEqual(statuscode, 0) self.assertNotIn('download test-testing-dummy', stdout + stderr) - rmRf(path) + util.rmRf(path) diff --git a/yotta/test/cli/util.py b/yotta/test/cli/util.py new file mode 100644 index 0000000..553619b --- /dev/null +++ b/yotta/test/cli/util.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python +# Copyright 2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 +# See LICENSE file for details. + +# standard library modules, , , +import tempfile +import os +import copy + +# internal modules: +import yotta.lib.fsutils as fsutils +from yotta.lib.detect import systemDefaultTarget + +# some simple example module definitions that can be re-used by multiple tests: +Test_Trivial_Lib = { +'module.json':'''{ + "name": "test-trivial-lib", + "version": "1.0.0", + "description": "Module to test trivial lib compilation", + "license": "Apache-2.0", + "dependencies": { + } +}''', + +'test-trivial-lib/lib.h': ''' +int foo(); +''', + +'source/lib.c':''' +#include "test-trivial-lib/lib.h" +int foo(){ return 7; } +''' +} + +Test_Trivial_Exe = { +'module.json':'''{ + "name": "test-trivial-exe", + "version": "1.0.0", + "description": "Module to test trivial exe compilation", + "license": "Apache-2.0", + "dependencies": { + }, + "bin":"./source" +}''', + +'source/lib.c':''' +int main(){ return 0; } +''' +} + +Test_Testing_Trivial_Lib_Dep = { +'module.json':'''{ + "name": "test-simple-module", + "version": "1.0.0", + "description": "a simple test module", + "author": "Someone Somewhere <[email protected]>", + "license": "Apache-2.0", + "dependencies": { + "test-trivial-lib": "^1.0.0" + } +} +''', + +'test-simple-module/simple.h': ''' +int simple(); +''', + +'source/lib.c':''' +#include "test-simple-module/simple.h" +int simple(){ return 123; } +''' +} + +Test_Testing_Trivial_Lib_Dep_Preinstalled = copy.copy(Test_Testing_Trivial_Lib_Dep) +for k, v in Test_Trivial_Lib.items(): + Test_Testing_Trivial_Lib_Dep_Preinstalled['yotta_modules/test-trivial-lib/' + k] = v + + +def getNativeTargetDescription(): + # actually returns a trivial target which inherits from the native target + native_target = nativeTarget() + if ',' in native_target: + native_target = native_target[:native_target.find(',')] + return { + 'target.json':'''{ + "name": "test-native-target", + "version": "1.0.0", + "license": "Apache-2.0", + "inherits": { + "%s": "*" + } + } + ''' % native_target + } + + +def writeTestFiles(files, add_space_in_path=False): + ''' write a dictionary of filename:contents into a new temporary directory + ''' + test_dir = tempfile.mkdtemp() + if add_space_in_path: + test_dir = test_dir + ' spaces in path' + + for path, contents in files.items(): + path_dir, file_name = os.path.split(path) + path_dir = os.path.join(test_dir, path_dir) + fsutils.mkDirP(path_dir) + with open(os.path.join(path_dir, file_name), 'w') as f: + f.write(contents) + return test_dir + +def isWindows(): + # can't run tests that hit github without an authn token + return os.name == 'nt' + +def canBuildNatively(): + return not isWindows() + +def nativeTarget(): + assert(canBuildNatively()) + return systemDefaultTarget() + +#expose rmRf for convenience +rmRf = fsutils.rmRf diff --git a/yotta/test/config.py b/yotta/test/config.py index 8c7b417..8c67192 100644 --- a/yotta/test/config.py +++ b/yotta/test/config.py @@ -7,12 +7,11 @@ import unittest import copy import os -import tempfile import logging # internal modules: -from yotta.lib.fsutils import mkDirP, rmRf from yotta.lib import validate +from .cli import util logging.basicConfig( level=logging.ERROR @@ -78,19 +77,6 @@ Test_Module_Config_Ignored['module.json'] = '''{ }''' class ConfigTest(unittest.TestCase): - def writeTestFiles(self, files, add_space_in_path=False): - test_dir = tempfile.mkdtemp() - if add_space_in_path: - test_dir = test_dir + ' spaces in path' - - for path, contents in files.items(): - path_dir, file_name = os.path.split(path) - path_dir = os.path.join(test_dir, path_dir) - mkDirP(path_dir) - with open(os.path.join(path_dir, file_name), 'w') as f: - f.write(contents) - return test_dir - def setUp(self): self.restore_cwd = os.getcwd() @@ -98,7 +84,7 @@ class ConfigTest(unittest.TestCase): os.chdir(self.restore_cwd) def test_targetConfigMerge(self): - test_dir = self.writeTestFiles(Test_Target_Config_Merge, True) + test_dir = util.writeTestFiles(Test_Target_Config_Merge, True) os.chdir(test_dir) c = validate.currentDirectoryModule() @@ -118,10 +104,10 @@ class ConfigTest(unittest.TestCase): self.assertEqual(merged_config['bar']['d'], "def") os.chdir(self.restore_cwd) - rmRf(test_dir) + util.rmRf(test_dir) def test_targetAppConfigMerge(self): - test_dir = self.writeTestFiles(Test_Target_Config_Merge_App, True) + test_dir = util.writeTestFiles(Test_Target_Config_Merge_App, True) os.chdir(test_dir) c = validate.currentDirectoryModule() @@ -144,10 +130,10 @@ class ConfigTest(unittest.TestCase): self.assertEqual(merged_config['new'], 123) os.chdir(self.restore_cwd) - rmRf(test_dir) + util.rmRf(test_dir) def test_moduleConfigIgnored(self): - test_dir = self.writeTestFiles(Test_Module_Config_Ignored, True) + test_dir = util.writeTestFiles(Test_Module_Config_Ignored, True) os.chdir(test_dir) c = validate.currentDirectoryModule() @@ -157,5 +143,5 @@ class ConfigTest(unittest.TestCase): self.assertNotIn("new", merged_config) os.chdir(self.restore_cwd) - rmRf(test_dir) + util.rmRf(test_dir) diff --git a/yotta/test/ignores.py b/yotta/test/ignores.py index 3a5f8e9..16832a8 100644 --- a/yotta/test/ignores.py +++ b/yotta/test/ignores.py @@ -8,13 +8,12 @@ # standard library modules, , , import unittest import os -import tempfile # internal modules: -from yotta.lib.fsutils import mkDirP, rmRf from yotta.lib.detect import systemDefaultTarget from yotta.lib import component from .cli import cli +from .cli import util Test_Files = { '.yotta_ignore': ''' @@ -115,24 +114,14 @@ def isWindows(): # can't run tests that hit github without an authn token return os.name == 'nt' -def writeTestFiles(files): - test_dir = tempfile.mkdtemp() - for path, contents in files.items(): - path_dir, file_name = os.path.split(path) - path_dir = os.path.join(test_dir, path_dir) - mkDirP(path_dir) - with open(os.path.join(path_dir, file_name), 'w') as f: - f.write(contents) - return test_dir - class TestPackIgnores(unittest.TestCase): @classmethod def setUpClass(cls): - cls.test_dir = writeTestFiles(Test_Files) + cls.test_dir = util.writeTestFiles(Test_Files) @classmethod def tearDownClass(cls): - rmRf(cls.test_dir) + util.rmRf(cls.test_dir) def test_absolute_ignores(self): c = component.Component(self.test_dir) @@ -158,7 +147,7 @@ class TestPackIgnores(unittest.TestCase): self.assertTrue(c.ignores('test/someothertest/alsoignored.c')) def test_default_ignores(self): - default_test_dir = writeTestFiles(Default_Test_Files) + default_test_dir = util.writeTestFiles(Default_Test_Files) c = component.Component(default_test_dir) self.assertTrue(c.ignores('.something.c.swp')) self.assertTrue(c.ignores('.something.c~')) @@ -173,7 +162,7 @@ class TestPackIgnores(unittest.TestCase): self.assertTrue(c.ignores('build')) self.assertTrue(c.ignores('.yotta.json')) - rmRf(default_test_dir) + util.rmRf(default_test_dir) def test_comments(self): c = component.Component(self.test_dir)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 4 }
0.9
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc cmake ninja-build" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
argcomplete==0.9.0 certifi==2025.1.31 cffi==1.17.1 charset-normalizer==3.4.1 colorama==0.3.9 cryptography==44.0.2 Deprecated==1.2.18 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work future==1.0.0 hgapi==1.7.4 idna==3.10 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work intelhex==2.3.0 intervaltree==3.1.0 Jinja2==2.11.3 jsonpointer==2.0 jsonschema==2.6.0 MarkupSafe==3.0.2 mbed_test_wrapper==0.0.3 packaging @ file:///croot/packaging_1734472117206/work pathlib==1.0.1 pluggy @ file:///croot/pluggy_1733169602837/work project-generator-definitions==0.2.46 project_generator==0.8.17 pycparser==2.22 pyelftools==0.23 PyGithub==1.54.1 PyJWT==1.7.1 pyocd==0.15.0 pytest @ file:///croot/pytest_1738938843180/work pyusb==1.3.1 PyYAML==3.13 requests==2.32.3 semantic-version==2.10.0 six==1.17.0 sortedcontainers==2.4.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work urllib3==2.3.0 valinor==0.0.15 websocket-client==1.8.0 wrapt==1.17.2 xmltodict==0.14.2 -e git+https://github.com/ARMmbed/yotta.git@852c1e498fbb12938fa28aa388cf2b2b650508fe#egg=yotta
name: yotta channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - argcomplete==0.9.0 - argparse==1.4.0 - certifi==2025.1.31 - cffi==1.17.1 - charset-normalizer==3.4.1 - colorama==0.3.9 - cryptography==44.0.2 - deprecated==1.2.18 - future==1.0.0 - hgapi==1.7.4 - idna==3.10 - intelhex==2.3.0 - intervaltree==3.1.0 - jinja2==2.11.3 - jsonpointer==2.0 - jsonschema==2.6.0 - markupsafe==3.0.2 - mbed-test-wrapper==0.0.3 - pathlib==1.0.1 - project-generator==0.8.17 - project-generator-definitions==0.2.46 - pycparser==2.22 - pyelftools==0.23 - pygithub==1.54.1 - pyjwt==1.7.1 - pyocd==0.15.0 - pyusb==1.3.1 - pyyaml==3.13 - requests==2.32.3 - semantic-version==2.10.0 - six==1.17.0 - sortedcontainers==2.4.0 - urllib3==2.3.0 - valinor==0.0.15 - websocket-client==1.8.0 - wrapt==1.17.2 - xmltodict==0.14.2 prefix: /opt/conda/envs/yotta
[ "yotta/test/cli/unlink.py::TestCLIUnLink::testUnlinkModuleGlobally", "yotta/test/cli/unlink.py::TestCLIUnLink::testUnlinkNonexistentModule", "yotta/test/cli/unlink.py::TestCLIUnLink::testUnlinkTarget", "yotta/test/cli/unlink.py::TestCLIUnLink::testUnlinkTargetGlobally" ]
[ "yotta/test/cli/build.py::TestCLIBuild::test_buildComplex", "yotta/test/cli/build.py::TestCLIBuild::test_buildComplexSpaceInPath", "yotta/test/cli/build.py::TestCLIBuild::test_buildInfo", "yotta/test/cli/build.py::TestCLIBuild::test_buildTests", "yotta/test/cli/build.py::TestCLIBuild::test_buildTrivialExe", "yotta/test/cli/build.py::TestCLIBuild::test_buildTrivialLib", "yotta/test/cli/link.py::TestCLILink::testLink", "yotta/test/cli/link.py::TestCLILink::testLinkedBuild", "yotta/test/cli/link.py::TestCLILink::testLinkedReBuild", "yotta/test/cli/link.py::TestCLILink::testTargetLinkedBuild", "yotta/test/cli/outdated.py::TestCLIOutdated::test_notOutdated", "yotta/test/cli/outdated.py::TestCLIOutdated::test_outdated", "yotta/test/cli/test.py::TestCLITest::test_testOutputFilterFailing", "yotta/test/cli/test.py::TestCLITest::test_testOutputFilterNotFound", "yotta/test/cli/test.py::TestCLITest::test_testOutputFilterPassing", "yotta/test/cli/test.py::TestCLITest::test_tests", "yotta/test/cli/update.py::TestCLIUpdate::test_update", "yotta/test/cli/update.py::TestCLIUpdate::test_updateExplicit", "yotta/test/cli/update.py::TestCLIUpdate::test_updateNothing", "yotta/test/config.py::ConfigTest::test_moduleConfigIgnored", "yotta/test/config.py::ConfigTest::test_targetAppConfigMerge", "yotta/test/config.py::ConfigTest::test_targetConfigMerge", "yotta/test/ignores.py::TestPackIgnores::test_build", "yotta/test/ignores.py::TestPackIgnores::test_test" ]
[ "yotta/test/cli/unlink.py::TestCLIUnLink::testUnlinkModule", "yotta/test/cli/unlink.py::TestCLIUnLink::testUnlinkNonexistentTarget", "yotta/test/cli/unlink.py::TestCLIUnLink::testUnlinkNotLinkedModuleGlobally", "yotta/test/cli/unlink.py::TestCLIUnLink::testUnlinkNotLinkedTargetGlobally", "yotta/test/ignores.py::TestPackIgnores::test_absolute_ignores", "yotta/test/ignores.py::TestPackIgnores::test_comments", "yotta/test/ignores.py::TestPackIgnores::test_default_ignores", "yotta/test/ignores.py::TestPackIgnores::test_glob_ignores", "yotta/test/ignores.py::TestPackIgnores::test_relative_ignores" ]
[]
Apache License 2.0
304
rackerlabs__lambda-uploader-35
c40923a6982a0a3d4fd41b135a4f9b7e97b74f90
2015-11-20 15:06:36
c40923a6982a0a3d4fd41b135a4f9b7e97b74f90
diff --git a/README.md b/README.md index 332bb35..bf0a3bb 100644 --- a/README.md +++ b/README.md @@ -29,11 +29,7 @@ Example lambda.json file: "handler": "function.lambda_handler", "role": "arn:aws:iam::00000000000:role/lambda_basic_execution", "requirements": ["pygithub"], - "ignore": [ - "circle.yml", - ".git", - "*.pyc" - ], + "ignore": ["circle.yml"], "timeout": 30, "memory": 512 } @@ -57,6 +53,11 @@ To specify an alternative, prexisting virtualenv use the `--virtualenv` paramete lambda-uploader --virtualenv=~/.virtualenv/my_custom_virtualenv ``` +To omit using a virtualenv use the `--no-virtualenv` parameter. +```shell +lambda-uploader --no-virtualenv +``` + If you would prefer to upload another way you can tell the uploader to ignore the upload. This will create a package and leave it in the project directory. ```shell diff --git a/README.rst b/README.rst index eaa1676..a9bf569 100644 --- a/README.rst +++ b/README.rst @@ -42,11 +42,7 @@ Example lambda.json file: "handler": "function.lambda_handler", "role": "arn:aws:iam::00000000000:role/lambda_basic_execution", "requirements": ["pygithub"], - "ignore": [ - "circle.yml", - ".git", - "*.pyc" - ], + "ignore": ["circle.yml"], "timeout": 30, "memory": 512 } @@ -75,6 +71,12 @@ To specify an alternative, prexisting virtualenv use the lambda-uploader --virtualenv=~/.virtualenv/my_custom_virtualenv +To omit using a virtualenv use the ``--no-virtualenv`` parameter. + +.. code:: shell + + lambda-uploader --no-virtualenv + If you would prefer to upload another way you can tell the uploader to ignore the upload. This will create a package and leave it in the project directory. diff --git a/example/lambda.json b/example/lambda.json index bdf786a..7148a5b 100644 --- a/example/lambda.json +++ b/example/lambda.json @@ -5,11 +5,7 @@ "handler": "function.lambda_handler", "role": "arn:aws:iam::00000000000:role/lambda_basic_execution", "requirements": ["Jinja2==2.8"], - "ignore": [ - "circle.yml", - ".git", - "*.pyc" - ], + "ignore": ["circle.yml"], "timeout": 30, "memory": 512 } diff --git a/lambda_uploader/package.py b/lambda_uploader/package.py index 1bb0089..ccbe79e 100644 --- a/lambda_uploader/package.py +++ b/lambda_uploader/package.py @@ -27,37 +27,24 @@ ZIPFILE_NAME = 'lambda_function.zip' def build_package(path, requirements, virtualenv=None, ignore=[]): - pkg = Package(path, virtualenv) + pkg = Package(path, virtualenv, requirements) pkg.clean_workspace() pkg.clean_zipfile() pkg.prepare_workspace() - if virtualenv: - if not os.path.isdir(virtualenv): - raise Exception("supplied virtualenv %s not found" % virtualenv) - LOG.info("Using existing virtualenv found in %s" % virtualenv) - else: - LOG.info('Building new virtualenv and installing requirements') - pkg.prepare_virtualenv() - pkg.install_requirements(requirements) + pkg.prepare_virtualenv() pkg.package(ignore) return pkg class Package(object): - def __init__(self, path, virtualenv=None): + def __init__(self, path, virtualenv=None, requirements=[]): self._path = path self._temp_workspace = os.path.join(path, TEMP_WORKSPACE_NAME) self.zip_file = os.path.join(path, ZIPFILE_NAME) - - if virtualenv: - self._pkg_venv = virtualenv - else: - self._pkg_venv = os.path.join(self._temp_workspace, 'venv') - self._venv_pip = 'bin/pip' - if sys.platform == 'win32' or sys.platform == 'cygwin': - self._venv_pip = 'Scripts\pip.exe' + self._virtualenv = virtualenv + self._requirements = requirements def clean_workspace(self): if os.path.isdir(self._temp_workspace): @@ -72,21 +59,59 @@ class Package(object): os.mkdir(self._temp_workspace) def prepare_virtualenv(self): - proc = Popen(["virtualenv", self._pkg_venv], stdout=PIPE, stderr=PIPE) - stdout, stderr = proc.communicate() - LOG.debug("Virtualenv stdout: %s" % stdout) - LOG.debug("Virtualenv stderr: %s" % stderr) + requirements_exist = \ + self._requirements or os.path.isfile("requirements.txt") + if self._virtualenv and self._virtualenv is not False: + if not os.path.isdir(self._virtualenv): + raise Exception("virtualenv %s not found" % self._virtualenv) + LOG.info("Using existing virtualenv at %s" % self._virtualenv) + + # use supplied virtualenv path + self._pkg_venv = self._virtualenv + elif self._virtualenv is None and requirements_exist: + LOG.info('Building new virtualenv and installing requirements') + self.build_new_virtualenv() + self.install_requirements() + elif self._virtualenv is None and not requirements_exist: + LOG.info('No requirements found, so no virtualenv will be made') + self._pkg_venv = False + elif self._virtualenv is False: + LOG.info('Virtualenv has been omitted by supplied flag') + self._pkg_venv = False + else: + raise Exception('Cannot determine what to do about virtualenv') - if proc.returncode is not 0: - raise Exception('virtualenv returned unsuccessfully') + def build_new_virtualenv(self): + if self._virtualenv is None: + # virtualenv was "None" which means "do default" + self._pkg_venv = os.path.join(self._temp_workspace, 'venv') + self._venv_pip = 'bin/pip' + if sys.platform == 'win32' or sys.platform == 'cygwin': + self._venv_pip = 'Scripts\pip.exe' + + proc = Popen(["virtualenv", self._pkg_venv], + stdout=PIPE, stderr=PIPE) + stdout, stderr = proc.communicate() + LOG.debug("Virtualenv stdout: %s" % stdout) + LOG.debug("Virtualenv stderr: %s" % stderr) + + if proc.returncode is not 0: + raise Exception('virtualenv returned unsuccessfully') + + else: + raise Exception('cannot build a new virtualenv when asked to omit') + + def install_requirements(self): + if not hasattr(self, '_pkg_venv'): + err = 'Must call build_new_virtualenv before install_requirements' + raise Exception(err) - def install_requirements(self, requirements): cmd = None - if requirements: + if self._requirements: LOG.debug("Installing requirements found %s in config" - % requirements) + % self._requirements) cmd = [os.path.join(self._pkg_venv, self._venv_pip), - 'install'] + requirements + 'install'] + self._requirements elif os.path.isfile("requirements.txt"): # Pip install @@ -109,18 +134,19 @@ class Package(object): # Copy site packages into package base LOG.info('Copying site packages') - site_packages = 'lib/python2.7/site-packages' - lib64_site_packages = 'lib64/python2.7/site-packages' - if sys.platform == 'win32' or sys.platform == 'cygwin': - lib64_site_packages = 'lib64\\site-packages' - site_packages = 'lib\\site-packages' - - utils.copy_tree(os.path.join(self._pkg_venv, site_packages), - package) - lib64_path = os.path.join(self._pkg_venv, lib64_site_packages) - if not os.path.islink(lib64_path): - LOG.info('Copying lib64 site packages') - utils.copy_tree(lib64_path, package) + if hasattr(self, '_pkg_venv') and self._pkg_venv: + site_packages = 'lib/python2.7/site-packages' + lib64_site_packages = 'lib64/python2.7/site-packages' + if sys.platform == 'win32' or sys.platform == 'cygwin': + lib64_site_packages = 'lib64\\site-packages' + site_packages = 'lib\\site-packages' + + utils.copy_tree(os.path.join(self._pkg_venv, site_packages), + package) + lib64_path = os.path.join(self._pkg_venv, lib64_site_packages) + if not os.path.islink(lib64_path): + LOG.info('Copying lib64 site packages') + utils.copy_tree(lib64_path, package) # Append the temp workspace to the ignore list ignore.append("^%s/*" % self._temp_workspace) diff --git a/lambda_uploader/shell.py b/lambda_uploader/shell.py index c36fe21..a62bdb5 100644 --- a/lambda_uploader/shell.py +++ b/lambda_uploader/shell.py @@ -49,9 +49,19 @@ def _execute(args): cfg = config.Config(pth, args.config, role=args.role) + if args.no_virtualenv: + # specified flag to omit entirely + venv = False + elif args.virtualenv: + # specified a custom virtualenv + venv = args.virtualenv + else: + # build and include virtualenv, the default + venv = None + _print('Building Package') pkg = package.build_package(pth, cfg.requirements, - args.virtualenv, cfg.ignore) + venv, cfg.ignore) if not args.no_clean: pkg.clean_workspace() @@ -101,6 +111,10 @@ def main(arv=None): parser.add_argument('--virtualenv', '-e', help='use specified virtualenv instead of making one', default=None) + parser.add_argument('--no-virtualenv', dest='no_virtualenv', + action='store_const', + help='do not create or include a virtualenv at all', + const=True) parser.add_argument('--role', dest='role', default=getenv('LAMBDA_UPLOADER_ROLE'), help=('IAM role to assign the lambda function, '
Option zip and upload only the folder contents Right now, uploading grabs a bunch of stuff I don't need (git, PyGitHub, etc). This creates a fairly large zip file, hence, larger lambda storage. Not being strong with Python, I'm guessing the extra stuff relates to using virtualenv. I don't need all those dependencies, and would prefer to pip install to the local directory. Could this be added as a new switch? Or is there something I'm missing and there's some other way for me to skip them?
rackerlabs/lambda-uploader
diff --git a/test/test_package.py b/test/test_package.py index 1a64572..a293d4f 100644 --- a/test/test_package.py +++ b/test/test_package.py @@ -1,5 +1,6 @@ import os import sys +import pytest from shutil import rmtree from os import path @@ -53,9 +54,9 @@ def test_install_requirements(): temp_workspace = path.join(TESTING_TEMP_DIR, package.TEMP_WORKSPACE_NAME) - pkg = package.Package(TESTING_TEMP_DIR) - # pkg.prepare_workspace() - pkg.install_requirements(reqs) + pkg = package.Package(TESTING_TEMP_DIR, requirements=reqs) + pkg.prepare_virtualenv() + site_packages = path.join(temp_workspace, 'venv/lib/python2.7/site-packages') if sys.platform == 'win32' or sys.platform == 'cygwin': @@ -64,9 +65,39 @@ def test_install_requirements(): assert path.isdir(path.join(site_packages, '_pytest')) +def test_default_virtualenv(): + temp_workspace = path.join(TESTING_TEMP_DIR, + package.TEMP_WORKSPACE_NAME) + pkg = package.Package(TESTING_TEMP_DIR) + pkg.prepare_virtualenv() + # ensure we picked a real venv path if using default behavior + assert pkg._pkg_venv == ("%s/venv" % temp_workspace) + + def test_existing_virtualenv(): + venv_dir = "virtualenv_test" + temp_virtualenv = path.join(TESTING_TEMP_DIR, venv_dir) + os.mkdir(temp_virtualenv) + + pkg = package.Package(TESTING_TEMP_DIR, temp_virtualenv) + pkg.prepare_virtualenv() + + assert pkg._pkg_venv == temp_virtualenv + + +def test_bad_existing_virtualenv(): pkg = package.Package(TESTING_TEMP_DIR, 'abc') - assert pkg._pkg_venv == 'abc' + with pytest.raises(Exception): + pkg.prepare_virtualenv() + + +def test_omit_virtualenv(): + pkg = package.Package(TESTING_TEMP_DIR, False) + pkg.prepare_virtualenv() + assert pkg._pkg_venv is False + + with pytest.raises(Exception): + pkg.build_new_virtualenv() def test_package():
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 3, "test_score": 3 }, "num_modified_files": 5 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[all]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.7", "reqs_path": [ "tests/pksetup_data/pksetupunit1/requirements.txt", "tests/pksetup_data/pksetupunit2/requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
boto3==1.1.4 botocore==1.2.11 certifi @ file:///croot/certifi_1671487769961/work/certifi coverage==7.2.7 distlib==0.3.9 docutils==0.20.1 exceptiongroup==1.2.2 execnet==2.0.2 filelock==3.12.2 futures==2.2.0 importlib-metadata==6.7.0 iniconfig==2.0.0 jmespath==0.10.0 -e git+https://github.com/rackerlabs/lambda-uploader.git@c40923a6982a0a3d4fd41b135a4f9b7e97b74f90#egg=lambda_uploader packaging==24.0 platformdirs==4.0.0 pluggy==1.2.0 pytest==7.4.4 pytest-asyncio==0.21.2 pytest-cov==4.1.0 pytest-mock==3.11.1 pytest-xdist==3.5.0 python-dateutil==2.9.0.post0 six==1.17.0 tomli==2.0.1 typing_extensions==4.7.1 virtualenv==20.26.6 zipp==3.15.0
name: lambda-uploader channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - boto3==1.1.4 - botocore==1.2.11 - coverage==7.2.7 - distlib==0.3.9 - docutils==0.20.1 - exceptiongroup==1.2.2 - execnet==2.0.2 - filelock==3.12.2 - futures==2.2.0 - importlib-metadata==6.7.0 - iniconfig==2.0.0 - jmespath==0.10.0 - packaging==24.0 - platformdirs==4.0.0 - pluggy==1.2.0 - pytest==7.4.4 - pytest-asyncio==0.21.2 - pytest-cov==4.1.0 - pytest-mock==3.11.1 - pytest-xdist==3.5.0 - python-dateutil==2.9.0.post0 - six==1.17.0 - tomli==2.0.1 - typing-extensions==4.7.1 - virtualenv==20.26.6 - zipp==3.15.0 prefix: /opt/conda/envs/lambda-uploader
[ "test/test_package.py::test_omit_virtualenv" ]
[ "test/test_package.py::test_install_requirements", "test/test_package.py::test_bad_existing_virtualenv" ]
[ "test/test_package.py::test_package_zip_location", "test/test_package.py::test_package_clean_workspace", "test/test_package.py::test_prepare_workspace", "test/test_package.py::test_default_virtualenv", "test/test_package.py::test_existing_virtualenv", "test/test_package.py::test_package" ]
[]
Apache License 2.0
305
mozilla__puente-47
4208bed33250cd3c9d8dacdc73dbc274ce21c85d
2015-11-20 21:37:58
4208bed33250cd3c9d8dacdc73dbc274ce21c85d
diff --git a/README.rst b/README.rst index eab331f..8bafb63 100644 --- a/README.rst +++ b/README.rst @@ -12,6 +12,8 @@ using Jinja2 templates. * merge command that merges new strings from a ``.pot`` file into locale ``.po`` files * code to collapse whitespace for Jinja2's trans block +* add pgettext and npgettext to template environment and they correctly + escape things and work the same way as Jinja2's newstyle gettext * configured using Django settings * solid documentation * solid tests diff --git a/docs/goals.rst b/docs/goals.rst index b26bd8e..b57553a 100644 --- a/docs/goals.rst +++ b/docs/goals.rst @@ -33,7 +33,9 @@ Puente does three nice things: 1. makes it easy to migrate from Tower to something you can use with Django 1.8 2. collapses whitespace in Jinja2 trans blocks -3. pulls bits from Django settings to configure extraction (e.g. Jinja2 +3. adds pgettext and npgettext to template environment that work like Jinja2's + newstyle gettext +4. pulls bits from Django settings to configure extraction (e.g. Jinja2 extensions) If you don't care about any of those things, go use Babel's pybabel command and @@ -75,9 +77,11 @@ What's different between Tower and Puente? indentation of the HTML template. That stinks because translators have to go through and fix all the translations. -3. Tower had a bunch of code to support msgctxt in extraction and gettext - calls, but Puente relies on Django's pgettext functions and Babel's - msgctxt support and that works super. +3. Tower had a bunch of code to support msgctxt in extraction and gettext calls, + but Puente relies on Django's pgettext functions and Babel's msgctxt support + and that works super... except in Jinja2 templates. Puente adds pgettext and + npgettext to the template environment and they work just like Jinja2's + newstyle gettext. 4. Tower had its own gettext and ngettext that marked output as safe, but Puente drops that because it's unneeded if you're using Jinja2's newstyle gettext @@ -120,7 +124,11 @@ We need to do the following before we can end Puente: https://github.com/mitsuhiko/jinja2/issues/504 -2. Puente's extract command should work more like Babel's pybabel extract +2. IN PROGRESS: Jinja2 needs to support pgettext/npgettext in templates. + + https://github.com/mitsuhiko/jinja2/issues/441 + +3. Puente's extract command should work more like Babel's pybabel extract command. The way forward is to phase Puente out for pybabel. In order to make that @@ -128,11 +136,10 @@ We need to do the following before we can end Puente: This should probably be broken up into more steps as we discover differences. -3. Ditch Puente's merge for pybabel's update? +4. Ditch Puente's merge for pybabel's update? -4. Need a nice way to use Django settings for pybabel configuration. For +5. Need a nice way to use Django settings for pybabel configuration. For example, I'd rather not have to define the list of Jinja2 extensions to use in two places. -5. Is there anything else? - +6. Is there anything else? diff --git a/docs/installation.rst b/docs/installation.rst index 4e9996c..83ba771 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -342,6 +342,24 @@ Note that ``BASE_DIR`` is the path to the project root. It's in the } +Templates +========= + +We hope you're using Jinja2's newstyle gettext and ``autoescape = True``. If +that's the case, then these docs will help: + +* `Jinja2 template i18n docs <http://jinja.pocoo.org/docs/dev/templates/#i18n>`_ +* `Jinja2 template newstyle docs <http://jinja.pocoo.org/docs/dev/extensions/#newstyle-gettext>`_ + +Further, Puente adds support for ``pgettext`` and ``npgettext`` in templates:: + + {{ pgettext("some context", "message string") }} + {{ npgettext("some context", "singular message", "plural message", 5) }} + + +FIXME: Expand on this and talk about escaping and ``|safe``. + + Extract and merge usage ======================= diff --git a/docs/migratingfromtower.rst b/docs/migratingfromtower.rst index 891c07b..04dc570 100644 --- a/docs/migratingfromtower.rst +++ b/docs/migratingfromtower.rst @@ -182,6 +182,7 @@ to do something like the following to switch to Puente. ('**.js', 'javascript') ] } + STANDALONE_DOMAINS = ['django'] The equivalent Puente configuration is something like this: diff --git a/puente/commands.py b/puente/commands.py index 24f07fe..16e5939 100644 --- a/puente/commands.py +++ b/puente/commands.py @@ -1,4 +1,5 @@ import os +import tempfile from subprocess import PIPE, Popen, call from tempfile import TemporaryFile @@ -11,6 +12,9 @@ from django.core.management.base import CommandError from puente.utils import monkeypatch_i18n +DEFAULT_DOMAIN_VALUE = 'all' + + def generate_options_map(): """Generate an ``options_map` to pass to ``extract_from_dir`` @@ -64,15 +68,16 @@ def generate_options_map(): ) -def extract_command(outputdir, domain_methods, text_domain, keywords, - comment_tags, base_dir, project, version, - msgid_bugs_address): +def extract_command(domain, outputdir, domain_methods, standalone_domains, + text_domain, keywords, comment_tags, base_dir, + project, version, msgid_bugs_address): """Extracts strings into .pot files :arg domain: domains to generate strings for or 'all' for all domains :arg outputdir: output dir for .pot files; usually locale/templates/LC_MESSAGES/ :arg domain_methods: DOMAIN_METHODS setting + :arg standalone_domains: STANDALONE_DOMAINS setting :arg text_domain: TEXT_DOMAIN settings :arg keywords: KEYWORDS setting :arg comment_tags: COMMENT_TAGS setting @@ -91,7 +96,11 @@ def extract_command(outputdir, domain_methods, text_domain, keywords, print('Creating output dir %s ...' % outputdir) os.makedirs(outputdir) - domains = domain_methods.keys() + # Figure out what domains to extract + if domain == DEFAULT_DOMAIN_VALUE: + domains = domain_methods.keys() + else: + domains = [domain] def callback(filename, method, options): if method != 'ignore': @@ -126,15 +135,49 @@ def extract_command(outputdir, domain_methods, text_domain, keywords, with open(os.path.join(outputdir, '%s.pot' % domain), 'wb') as fp: write_po(fp, catalog, width=80) + not_standalone_domains = [ + dom for dom in domains + if dom not in standalone_domains + ] + + pot_files = [] + for dom in not_standalone_domains: + pot_files.append(os.path.join(outputdir, '%s.pot' % dom)) + + if len(pot_files) > 1: + pot_file = text_domain + '.pot' + print('Concatenating the non-standalone domains into %s' % pot_file) + + final_out = os.path.join(outputdir, pot_file) + + # We add final_out back on because msgcat will combine all + # specified files. We'll redirect everything back in to + # final_out in a minute. + pot_files.append(final_out) + + meltingpot = tempfile.TemporaryFile() + p1 = Popen(['msgcat'] + pot_files, stdout=meltingpot) + p1.communicate() + meltingpot.seek(0) + + # w+ truncates the file first + with open(final_out, 'w+') as final: + final.write(meltingpot.read()) + + meltingpot.close() + + for dom in not_standalone_domains: + os.remove(os.path.join(outputdir, '%s.pot' % dom)) + print('Done') -def merge_command(create, base_dir, domain_methods, languages): +def merge_command(create, base_dir, standalone_domains, languages): """ :arg create: whether or not to create directories if they don't exist :arg base_dir: BASE_DIR setting - :arg domain_methods: DOMAIN_METHODS setting + :arg standalone_domains: STANDALONE_DOMAINS setting :arg languages: LANGUAGES setting """ @@ -166,9 +209,8 @@ def merge_command(create, base_dir, domain_methods, languages): if not os.path.exists(d): os.makedirs(d) - domains = domain_methods.keys() - for domain in domains: - print 'Merging %s strings to each locale...' % domain + for domain in standalone_domains: + print('Merging %s strings to each locale...' % domain) domain_pot = os.path.join(locale_dir, 'templates', 'LC_MESSAGES', '%s.pot' % domain) if not os.path.isfile(domain_pot): diff --git a/puente/ext.py b/puente/ext.py index 8f3e7a7..ea6714b 100644 --- a/puente/ext.py +++ b/puente/ext.py @@ -1,8 +1,28 @@ +from django.utils.translation import pgettext as pgettext_real, npgettext as npgettext_real + from jinja2.ext import InternationalizationExtension +from jinja2.utils import contextfunction, Markup from puente.utils import collapse_whitespace +@contextfunction +def pgettext(__context, context, message, **variables): + rv = pgettext_real(context, message) + if __context.eval_ctx.autoescape: + rv = Markup(rv) + return rv % variables + + +@contextfunction +def npgettext(__context, context, singular, plural, number, **variables): + variables.setdefault('num', number) + rv = npgettext_real(context, singular, plural, number) + if __context.eval_ctx.autoescape: + rv = Markup(rv) + return rv % variables + + class PuenteI18nExtension(InternationalizationExtension): """Provides whitespace collapsing trans behavior @@ -12,6 +32,11 @@ class PuenteI18nExtension(InternationalizationExtension): Jinja2 templates. """ + def __init__(self, environment): + super(PuenteI18nExtension, self).__init__(environment) + environment.globals['pgettext'] = pgettext + environment.globals['npgettext'] = npgettext + def _parse_block(self, parser, allow_pluralize): parse_block = InternationalizationExtension._parse_block ref, buffer = parse_block(self, parser, allow_pluralize) diff --git a/puente/management/commands/extract.py b/puente/management/commands/extract.py index c688034..de8a5c2 100644 --- a/puente/management/commands/extract.py +++ b/puente/management/commands/extract.py @@ -3,7 +3,7 @@ from optparse import make_option from django.core.management.base import BaseCommand -from puente.commands import extract_command +from puente.commands import DEFAULT_DOMAIN_VALUE, extract_command from puente.settings import get_setting @@ -11,6 +11,15 @@ class Command(BaseCommand): help = 'Extracts strings for translation.' option_list = BaseCommand.option_list + ( + make_option( + '--domain', '-d', default=DEFAULT_DOMAIN_VALUE, + dest='domain', + help=( + 'The domain of the message files. If "all" ' + 'everything will be extracted and combined into ' + '%s.pot. (default: %%default).' % get_setting('TEXT_DOMAIN') + ) + ), make_option( '--output-dir', '-o', default=os.path.join(get_setting('BASE_DIR'), 'locale', @@ -20,15 +29,17 @@ class Command(BaseCommand): 'The directory where extracted files will be placed. ' '(Default: %default)' ) - ), + ) ) def handle(self, *args, **options): return extract_command( # Command line arguments + domain=options.get('domain'), outputdir=options.get('outputdir'), # From settings.py domain_methods=get_setting('DOMAIN_METHODS'), + standalone_domains=get_setting('STANDALONE_DOMAINS'), text_domain=get_setting('TEXT_DOMAIN'), keywords=get_setting('KEYWORDS'), comment_tags=get_setting('COMMENT_TAGS'), diff --git a/puente/management/commands/merge.py b/puente/management/commands/merge.py index 74de7d9..b34c2e4 100644 --- a/puente/management/commands/merge.py +++ b/puente/management/commands/merge.py @@ -38,7 +38,7 @@ class Command(BaseCommand): return merge_command( create=options.get('create'), base_dir=get_setting('BASE_DIR'), - domain_methods=get_setting('DOMAIN_METHODS'), + standalone_domains=get_setting('STANDALONE_DOMAINS'), languages=getattr(settings, 'LANGUAGES', []) ) diff --git a/puente/settings.py b/puente/settings.py index 55609ca..edf5321 100644 --- a/puente/settings.py +++ b/puente/settings.py @@ -7,6 +7,12 @@ TEXT_DOMAIN = 'django' # Keywords indicating gettext calls KEYWORDS = generate_keywords() +# By default, all the domains you speficy will be merged into one big django.po +# file. If you want to separate a domain from the main .po file, specify it in +# this list. Make sure to include TEXT_DOMAIN in this list, even if you have +# other .po files you're generating +STANDALONE_DOMAINS = [TEXT_DOMAIN] + # Prefixes that indicate a comment tag intended for localizers COMMENT_TAGS = ['L10n:', 'L10N:', 'l10n:', 'l10N:', 'Translators:']
pgettext for templates Templates have `_` (alias for gettext), `gettext` and `ngettext` available. These are handled by Jinja2 contextfunctions which call the underlying installed gettext/ngettext callables, get strings back and then wrap in a `Markup`. https://github.com/mitsuhiko/jinja2/blob/master/jinja2/ext.py#L135 Yay! That's great! However, there's no pgettext and no way that I can see to pass in a msgctxt. This issue covers figuring out what to do about that. At the moment, I'm leaning towards adding it as a global to our `PuenteI18nExtension` for the short-term. Long-term, we should submit a PR for this issue: https://github.com/mitsuhiko/jinja2/issues/441
mozilla/puente
diff --git a/tests/test_ext.py b/tests/test_ext.py index c6ebc92..aa912f3 100644 --- a/tests/test_ext.py +++ b/tests/test_ext.py @@ -107,6 +107,74 @@ class TestPuenteI18nExtension: ) assert render(tmpl) == '<b>multiple <i>bar</i></b>' + def test_pgettext(self): + tmpl = '{{ pgettext("context", "message") }}' + assert render(tmpl) == 'message' + + def test_pgettext_is_safe(self): + tmpl = '{{ pgettext("context", "<b>foo</b>") }}' + assert render(tmpl) == '<b>foo</b>' + + def test_pgettext_variable_value_notsafe(self): + tmpl = '{{ pgettext("context", "<b>%(foo)s</b>", foo="<i>bar</i>") }}' + assert render(tmpl) == '<b>&lt;i&gt;bar&lt;/i&gt;</b>' + + def test_pgettext_variable_value_marked_safe_is_safe(self): + tmpl = '{{ pgettext("context", "<b>%(foo)s</b>", foo="<i>bar</i>"|safe) }}' + assert render(tmpl) == '<b><i>bar</i></b>' + + def test_pgettext_variable_values_autoescape_false(self): + tmpl = ( + '{% autoescape False %}' + '{{ pgettext("context", "<b>%(foo)s</b>", foo="<i>bar</i>") }}' + '{% endautoescape %}' + ) + assert render(tmpl) == '<b><i>bar</i></b>' + + def test_npgettext(self): + tmpl = '{{ npgettext("context", "sing", "plur", 1) }}' + assert render(tmpl) == "sing" + tmpl = '{{ npgettext("context", "sing", "plur", 2) }}' + assert render(tmpl) == "plur" + + def test_npgettext_is_safe(self): + tmpl = '{{ npgettext("context", "<b>sing</b>", "<b>plur</b>", 1) }}' + assert render(tmpl) == "<b>sing</b>" + tmpl = '{{ npgettext("context", "<b>sing</b>", "<b>plur</b>", 2) }}' + assert render(tmpl) == "<b>plur</b>" + + def test_npgettext_variable_num(self): + tmpl = '{{ npgettext("context", "<b>sing %(num)s</b>", "<b>plur %(num)s</b>", 1) }}' + assert render(tmpl) == "<b>sing 1</b>" + tmpl = '{{ npgettext("context", "<b>sing %(num)s</b>", "<b>plur %(num)s</b>", 2) }}' + assert render(tmpl) == "<b>plur 2</b>" + + def test_npgettext_variable_values_notsafe(self): + tmpl = '{{ npgettext("context", "<b>sing %(foo)s</b>", "<b>plur %(foo)s</b>", 1, foo="<i>bar</i>") }}' + assert render(tmpl) == '<b>sing &lt;i&gt;bar&lt;/i&gt;</b>' + tmpl = '{{ npgettext("context", "<b>sing %(foo)s</b>", "<b>plur %(foo)s</b>", 2, foo="<i>bar</i>") }}' + assert render(tmpl) == '<b>plur &lt;i&gt;bar&lt;/i&gt;</b>' + + def test_npgettext_variable_value_marked_safe_is_safe(self): + tmpl = '{{ npgettext("context", "<b>sing %(foo)s</b>", "<b>plur %(foo)s</b>", 1, foo="<i>bar</i>"|safe) }}' + assert render(tmpl) == '<b>sing <i>bar</i></b>' + tmpl = '{{ npgettext("context", "<b>sing %(foo)s</b>", "<b>plur %(foo)s</b>", 2, foo="<i>bar</i>"|safe) }}' + assert render(tmpl) == '<b>plur <i>bar</i></b>' + + def test_npgettext_variable_values_autoescape_false(self): + tmpl = ( + '{% autoescape False %}' + '{{ npgettext("context", "<b>sing %(foo)s</b>", "<b>plur %(foo)s</b>", 1, foo="<i>bar</i>") }}' + '{% endautoescape %}' + ) + assert render(tmpl) == '<b>sing <i>bar</i></b>' + tmpl = ( + '{% autoescape False %}' + '{{ npgettext("context", "<b>sing %(foo)s</b>", "<b>plur %(foo)s</b>", 2, foo="<i>bar</i>") }}' + '{% endautoescape %}' + ) + assert render(tmpl) == '<b>plur <i>bar</i></b>' + def test_trans(self): tmpl = '<div>{% trans %}puente rules!{% endtrans %}</div>' assert render(tmpl) == '<div>puente rules!</div>' diff --git a/tests/test_extract.py b/tests/test_extract.py index bc91f54..0a5a4c5 100644 --- a/tests/test_extract.py +++ b/tests/test_extract.py @@ -41,6 +41,7 @@ class TestExtractCommand: # Extract extract_command( + domain='all', outputdir=str(tmpdir), domain_methods={ 'django': [ @@ -48,6 +49,7 @@ class TestExtractCommand: ('*.html', 'jinja2'), ] }, + standalone_domains=puente_settings.STANDALONE_DOMAINS, text_domain=puente_settings.TEXT_DOMAIN, keywords=puente_settings.KEYWORDS, comment_tags=puente_settings.COMMENT_TAGS, @@ -81,6 +83,7 @@ class TestExtractCommand: def test_header(self, tmpdir): # Extract extract_command( + domain='all', outputdir=str(tmpdir), domain_methods={ 'django': [ @@ -88,6 +91,7 @@ class TestExtractCommand: ('*.html', 'jinja2'), ] }, + standalone_domains=puente_settings.STANDALONE_DOMAINS, text_domain=puente_settings.TEXT_DOMAIN, keywords=puente_settings.KEYWORDS, comment_tags=puente_settings.COMMENT_TAGS, @@ -139,6 +143,7 @@ class TestExtractCommand: # Extract extract_command( + domain='all', outputdir=str(tmpdir), domain_methods={ 'django': [ @@ -146,6 +151,7 @@ class TestExtractCommand: ('*.html', 'jinja2'), ] }, + standalone_domains=puente_settings.STANDALONE_DOMAINS, text_domain=puente_settings.TEXT_DOMAIN, keywords=puente_settings.KEYWORDS, comment_tags=puente_settings.COMMENT_TAGS, @@ -187,6 +193,7 @@ class TestExtractCommand: # Extract extract_command( + domain='all', outputdir=str(tmpdir), domain_methods={ 'django': [ @@ -194,6 +201,7 @@ class TestExtractCommand: ('*.html', 'jinja2'), ] }, + standalone_domains=puente_settings.STANDALONE_DOMAINS, text_domain=puente_settings.TEXT_DOMAIN, keywords=puente_settings.KEYWORDS, comment_tags=puente_settings.COMMENT_TAGS, @@ -237,6 +245,7 @@ class TestExtractCommand: # Extract extract_command( + domain='all', outputdir=str(tmpdir), domain_methods={ 'django': [ @@ -244,6 +253,7 @@ class TestExtractCommand: ('*.html', 'jinja2'), ] }, + standalone_domains=puente_settings.STANDALONE_DOMAINS, text_domain=puente_settings.TEXT_DOMAIN, keywords=puente_settings.KEYWORDS, comment_tags=puente_settings.COMMENT_TAGS, diff --git a/tests/test_merge.py b/tests/test_merge.py index 29e1456..0278ba1 100644 --- a/tests/test_merge.py +++ b/tests/test_merge.py @@ -8,6 +8,7 @@ from django.core.management import CommandError from django.test import TestCase from puente.commands import merge_command +from puente.settings import get_setting class TestManageMerge(TestCase): @@ -66,12 +67,7 @@ class TestMergecommand: merge_command( create=True, base_dir=str(tmpdir), - domain_methods={ - 'django': [ - ('*.py', 'python'), - ('*.html', 'jinja2'), - ] - }, + standalone_domains=get_setting('STANDALONE_DOMAINS'), languages=['de', 'en-US', 'fr'] ) @@ -84,11 +80,6 @@ class TestMergecommand: merge_command( create=True, base_dir=str(tmpdir), - domain_methods={ - 'django': [ - ('*.py', 'python'), - ('*.html', 'jinja2'), - ] - }, + standalone_domains=get_setting('STANDALONE_DOMAINS'), languages=['de', 'en-US', 'fr'] )
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 9 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-pythonpath", "pytest-django" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements.txt", "requirements-dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.16 asgiref==3.8.1 attrs==25.3.0 babel==2.17.0 backports.tarfile==1.2.0 build==1.2.2.post1 cachetools==5.5.2 certifi==2025.1.31 cffi==1.17.1 chardet==5.2.0 charset-normalizer==3.4.1 check-manifest==0.50 colorama==0.4.6 cryptography==44.0.2 distlib==0.3.9 Django==4.2.20 django-jinja==2.11.0 docutils==0.21.2 filelock==3.18.0 id==1.5.0 idna==3.10 imagesize==1.4.1 importlib_metadata==8.6.1 iniconfig==2.1.0 jaraco.classes==3.4.0 jaraco.context==6.0.1 jaraco.functools==4.1.0 jeepney==0.9.0 Jinja2==3.1.6 keyring==25.6.0 markdown-it-py==3.0.0 MarkupSafe==3.0.2 mdurl==0.1.2 more-itertools==10.6.0 nh3==0.2.21 packaging==24.2 platformdirs==4.3.7 pluggy==1.5.0 -e git+https://github.com/mozilla/puente.git@4208bed33250cd3c9d8dacdc73dbc274ce21c85d#egg=puente py==1.11.0 pycparser==2.22 Pygments==2.19.1 pyproject-api==1.9.0 pyproject_hooks==1.2.0 pytest==6.2.5 pytest-django==4.5.2 pytest-pythonpath==0.7.4 readme_renderer==44.0 requests==2.32.3 requests-toolbelt==1.0.0 rfc3986==2.0.0 rich==14.0.0 SecretStorage==3.3.3 snowballstemmer==2.2.0 Sphinx==7.4.7 sphinxcontrib-applehelp==2.0.0 sphinxcontrib-devhelp==2.0.0 sphinxcontrib-htmlhelp==2.1.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==2.0.0 sphinxcontrib-serializinghtml==2.0.0 sqlparse==0.5.3 swebench_matterhorn @ file:///swebench_matterhorn toml==0.10.2 tomli==2.2.1 tox==4.25.0 twine==6.1.0 typing_extensions==4.13.0 urllib3==2.3.0 virtualenv==20.29.3 zipp==3.21.0
name: puente channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.16 - asgiref==3.8.1 - attrs==25.3.0 - babel==2.17.0 - backports-tarfile==1.2.0 - build==1.2.2.post1 - cachetools==5.5.2 - certifi==2025.1.31 - cffi==1.17.1 - chardet==5.2.0 - charset-normalizer==3.4.1 - check-manifest==0.50 - colorama==0.4.6 - cryptography==44.0.2 - distlib==0.3.9 - django==4.2.20 - django-jinja==2.11.0 - docutils==0.21.2 - filelock==3.18.0 - id==1.5.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - jaraco-classes==3.4.0 - jaraco-context==6.0.1 - jaraco-functools==4.1.0 - jeepney==0.9.0 - jinja2==3.1.6 - keyring==25.6.0 - markdown-it-py==3.0.0 - markupsafe==3.0.2 - mdurl==0.1.2 - more-itertools==10.6.0 - nh3==0.2.21 - packaging==24.2 - platformdirs==4.3.7 - pluggy==1.5.0 - py==1.11.0 - pycparser==2.22 - pygments==2.19.1 - pyproject-api==1.9.0 - pyproject-hooks==1.2.0 - pytest==6.2.5 - pytest-django==4.5.2 - pytest-pythonpath==0.7.4 - readme-renderer==44.0 - requests==2.32.3 - requests-toolbelt==1.0.0 - rfc3986==2.0.0 - rich==14.0.0 - secretstorage==3.3.3 - snowballstemmer==2.2.0 - sphinx==7.4.7 - sphinxcontrib-applehelp==2.0.0 - sphinxcontrib-devhelp==2.0.0 - sphinxcontrib-htmlhelp==2.1.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==2.0.0 - sphinxcontrib-serializinghtml==2.0.0 - sqlparse==0.5.3 - swebench-matterhorn==0.0.0 - toml==0.10.2 - tomli==2.2.1 - tox==4.25.0 - twine==6.1.0 - typing-extensions==4.13.0 - urllib3==2.3.0 - virtualenv==20.29.3 - zipp==3.21.0 prefix: /opt/conda/envs/puente
[ "tests/test_merge.py::TestMergecommand::test_missing_pot_file" ]
[ "tests/test_extract.py::TestManageExtract::test_help", "tests/test_merge.py::TestManageMerge::test_help", "tests/test_ext.py::TestPuenteI18nExtension::test_gettext", "tests/test_ext.py::TestPuenteI18nExtension::test_gettext_is_safe", "tests/test_ext.py::TestPuenteI18nExtension::test_gettext_variable_values_notsafe", "tests/test_ext.py::TestPuenteI18nExtension::test_gettext_variable_values_autoescape_false", "tests/test_ext.py::TestPuenteI18nExtension::test_gettext_variable_values_marked_safe_are_safe", "tests/test_ext.py::TestPuenteI18nExtension::test_gettext_format_notsafe", "tests/test_ext.py::TestPuenteI18nExtension::test_gettext_format_autoescape_false", "tests/test_ext.py::TestPuenteI18nExtension::test_ngettext", "tests/test_ext.py::TestPuenteI18nExtension::test_ngettext_is_safe", "tests/test_ext.py::TestPuenteI18nExtension::test_ngettext_variable_num", "tests/test_ext.py::TestPuenteI18nExtension::test_ngettext_variable_values_notsafe", "tests/test_ext.py::TestPuenteI18nExtension::test_ngettext_variable_value_marked_safe_is_safe", "tests/test_ext.py::TestPuenteI18nExtension::test_ngettext_variable_values_autoescape_false", "tests/test_ext.py::TestPuenteI18nExtension::test_pgettext", "tests/test_ext.py::TestPuenteI18nExtension::test_pgettext_is_safe", "tests/test_ext.py::TestPuenteI18nExtension::test_pgettext_variable_value_notsafe", "tests/test_ext.py::TestPuenteI18nExtension::test_pgettext_variable_value_marked_safe_is_safe", "tests/test_ext.py::TestPuenteI18nExtension::test_pgettext_variable_values_autoescape_false", "tests/test_ext.py::TestPuenteI18nExtension::test_npgettext", "tests/test_ext.py::TestPuenteI18nExtension::test_npgettext_is_safe", "tests/test_ext.py::TestPuenteI18nExtension::test_npgettext_variable_num", "tests/test_ext.py::TestPuenteI18nExtension::test_npgettext_variable_values_notsafe", "tests/test_ext.py::TestPuenteI18nExtension::test_npgettext_variable_value_marked_safe_is_safe", "tests/test_ext.py::TestPuenteI18nExtension::test_npgettext_variable_values_autoescape_false", "tests/test_ext.py::TestPuenteI18nExtension::test_trans", "tests/test_ext.py::TestPuenteI18nExtension::test_trans_whitespace", "tests/test_ext.py::TestPuenteI18nExtension::test_trans_plural", "tests/test_ext.py::TestPuenteI18nExtension::test_trans_interpolation", "tests/test_ext.py::TestPuenteI18nExtension::test_trans_interpolation_with_autoescape_off", "tests/test_ext.py::TestPuenteI18nExtension::test_trans_interpolation_and_safe", "tests/test_ext.py::TestPuenteI18nExtension::test_trans_interpolation_and_safe_with_autoescape_off", "tests/test_extract.py::TestExtractCommand::test_basic_extraction", "tests/test_extract.py::TestExtractCommand::test_header", "tests/test_extract.py::TestExtractCommand::test_whitespace_collapsing", "tests/test_extract.py::TestExtractCommand::test_context", "tests/test_extract.py::TestExtractCommand::test_plurals", "tests/test_merge.py::TestMergecommand::test_basic" ]
[]
[]
BSD 3-Clause "New" or "Revised" License
306
sympy__sympy-10166
cbc73f198998c4d53b228511ed3a821fa4e1bb8f
2015-11-21 18:12:53
0241b35cae5f2adc04dc37b25f7dc9c5f00bd746
asmeurer: All the Travis builds are erroring with "No output has been received in the last 10 minutes, this potentially indicates a stalled build or something wrong with the build itself." I'm guessing this means there is a nontrivial performance hit here. smichr: > nontrivial performance hit :-) not really, just a garden-variety infinite loop that I have to watch out for
diff --git a/data/TeXmacs/bin/tm_sympy b/data/TeXmacs/bin/tm_sympy index 659818d21b..a801156f3f 100755 --- a/data/TeXmacs/bin/tm_sympy +++ b/data/TeXmacs/bin/tm_sympy @@ -69,7 +69,7 @@ _greek = 'alpha beta gamma delta epsilon zeta eta ' \ 'sigma tau upsilon phi chi psi omega' for _symbol in _greek.split(' '): - exec("%s = Symbol('%s')" % (_symbol, _symbol)) + exec "%s = Symbol('%s')" % (_symbol, _symbol) del _symbol """ diff --git a/sympy/core/logic.py b/sympy/core/logic.py index d12bee1562..077f277c76 100644 --- a/sympy/core/logic.py +++ b/sympy/core/logic.py @@ -64,23 +64,11 @@ def fuzzy_bool(x): """Return True, False or None according to x. Whereas bool(x) returns True or False, fuzzy_bool allows - for the None value and non-false values (which become None), too. - - Examples - ======== - - >>> from sympy.core.logic import fuzzy_bool - >>> from sympy.abc import x - >>> fuzzy_bool(x), fuzzy_bool(None) - (None, None) - >>> bool(x), bool(None) - (True, False) - + for the None value. """ if x is None: return None - if x in (True, False): - return bool(x) + return bool(x) def fuzzy_and(args): diff --git a/sympy/core/mul.py b/sympy/core/mul.py index 2a0e6090e4..30a51adb8e 100644 --- a/sympy/core/mul.py +++ b/sympy/core/mul.py @@ -368,17 +368,17 @@ def flatten(cls, seq): # gather exponents of common bases... def _gather(c_powers): - new_c_powers = [] common_b = {} # b:e for b, e in c_powers: co = e.as_coeff_Mul() - common_b.setdefault(b, {}).setdefault(co[1], []).append(co[0]) + common_b.setdefault(b, {}).setdefault( + co[1], []).append(co[0]) for b, d in common_b.items(): for di, li in d.items(): d[di] = Add(*li) + new_c_powers = [] for b, e in common_b.items(): - for t, c in e.items(): - new_c_powers.append((b, c*t)) + new_c_powers.extend([(b, c*t) for t, c in e.items()]) return new_c_powers # in c_powers @@ -402,14 +402,45 @@ def _gather(c_powers): # 0 1 # x -> 1 x -> x - for b, e in c_powers: - if e is S.One: - if b.is_Number: - coeff *= b - else: - c_part.append(b) - elif e is not S.Zero: - c_part.append(Pow(b, e)) + + # this should only need to run twice; if it fails because + # it needs to be run more times, perhaps this should be + # changed to a "while True" loop -- the only reason it + # isn't such now is to allow a less-than-perfect result to + # be obtained rather than raising an error or entering an + # infinite loop + for i in range(2): + new_c_powers = [] + changed = False + for b, e in c_powers: + if e.is_zero: + continue + if e is S.One: + if b.is_Number: + coeff *= b + continue + p = b + if e is not S.One: + p = Pow(b, e) + # check to make sure that the base doesn't change + # after exponentiation; to allow for unevaluated + # Pow, we only do so if b is not already a Pow + if p.is_Pow and not b.is_Pow: + bi = b + b, e = p.as_base_exp() + if b != bi: + changed = True + c_part.append(p) + new_c_powers.append((b, e)) + # there might have been a change, but unless the base + # matches some other base, there is nothing to do + if changed and len(set( + b for b, e in new_c_powers)) != len(new_c_powers): + # start over again + c_part = [] + c_powers = _gather(new_c_powers) + else: + break # x x x # 2 * 3 -> 6 diff --git a/sympy/polys/polyutils.py b/sympy/polys/polyutils.py index 0e74704deb..28f9876313 100644 --- a/sympy/polys/polyutils.py +++ b/sympy/polys/polyutils.py @@ -262,7 +262,7 @@ def _is_coeff(factor): else: base, exp = decompose_power_rat(factor) - elements[base] = exp + elements[base] = elements.setdefault(base, 0) + exp gens.add(base) terms.append((coeff, elements)) diff --git a/sympy/sets/sets.py b/sympy/sets/sets.py index 3b0ba18a62..684df72095 100644 --- a/sympy/sets/sets.py +++ b/sympy/sets/sets.py @@ -1063,8 +1063,8 @@ def _eval_imageset(self, f): else: return imageset(f, Interval(self.start, sing[0], self.left_open, True)) + \ - Union(*[imageset(f, Interval(sing[i], sing[i + 1], True, True)) - for i in range(0, len(sing) - 1)]) + \ + Union(*[imageset(f, Interval(sing[i], sing[i + 1]), True, True) + for i in range(1, len(sing) - 1)]) + \ imageset(f, Interval(sing[-1], self.end, True, self.right_open)) @property @@ -1396,7 +1396,7 @@ def flatten(arg): args = flatten(args) if len(args) == 0: - return S.EmptySet + raise TypeError("Intersection expected at least one argument") # args can't be ordered for Partition see issue #9608 if 'Partition' not in [type(a).__name__ for a in args]: @@ -1435,72 +1435,6 @@ def __iter__(self): raise ValueError("None of the constituent sets are iterable") - @staticmethod - def _handle_finite_sets(args): - from sympy.core.logic import fuzzy_and, fuzzy_bool - from sympy.core.compatibility import zip_longest - - new_args = [] - fs_args = [] - for s in args: - if s.is_FiniteSet: - fs_args.append(s) - else: - new_args.append(s) - if not fs_args: - return - s = fs_args[0] - fs_args = fs_args[1:] - res = [] - unk = [] - for x in s: - c = fuzzy_and(fuzzy_bool(o.contains(x)) - for o in fs_args + new_args) - if c: - res.append(x) - elif c is None: - unk.append(x) - else: - pass # drop arg - res = FiniteSet( - *res, evaluate=False) if res else S.EmptySet - if unk: - symbolic_s_list = [x for x in s if x.has(Symbol)] - non_symbolic_s = s - FiniteSet( - *symbolic_s_list, evaluate=False) - while fs_args: - v = fs_args.pop() - if all(i == j for i, j in zip_longest( - symbolic_s_list, - (x for x in v if x.has(Symbol)))): - # all the symbolic elements of `v` are the same - # as in `s` so remove the non-symbol containing - # expressions from `unk`, since they cannot be - # contained - for x in non_symbolic_s: - if x in unk: - unk.remove(x) - else: - # if only a subset of elements in `s` are - # contained in `v` then remove them from `v` - # and add this as a new arg - contained = [x for x in symbolic_s_list - if v.contains(x) == True] - if contained != symbolic_s_list: - new_args.append( - v - FiniteSet( - *contained, evaluate=False)) - else: - pass # for coverage - - other_sets = Intersection(*new_args) - if not other_sets: - return S.EmptySet # b/c we use evaluate=False below - res += Intersection( - FiniteSet(*unk), - other_sets, evaluate=False) - return res - @staticmethod def reduce(args): """ @@ -1518,10 +1452,52 @@ def reduce(args): if any(s.is_EmptySet for s in args): return S.EmptySet - # Handle Finite sets - rv = Intersection._handle_finite_sets(args) - if rv is not None: - return rv + for s in args: + if s.is_FiniteSet: + # see which elements of the FiniteSet occur within + # all other sets in the intersection + other_args = [a for a in args if a != s] + res = FiniteSet(*[x for x in s if all( + o.contains(x) == True for o in other_args)]) + unk = [x for x in s if any( + o.contains(x) not in (True, False) for o in other_args)] + if unk: + new_other = [] + del_other = [] + for ival, val in enumerate(other_args): + if val.is_FiniteSet: + # collect expressions having symbols + # from `val` and `s` + symbol_in_val = [x for x in val if x.has(Symbol)] + symbol_in_s = [x for x in s if x.has(Symbol)] + del_other.append(ival) + # if expression with symbols are same in `s` and `val` + # then remove the non-symbol containing expressions + # from `unk`, since they can not be contained + if symbol_in_s == symbol_in_val: + syms = FiniteSet(*symbol_in_s, evaluate=False) + non_symbol_in_s = s - syms + s = syms + for x in non_symbol_in_s: + if x in unk: + unk.remove(x) + else: + fin = FiniteSet(* + [x for x in symbol_in_s + if val.contains(x) == True], + evaluate=False) + if s != fin: + val = val - fin + new_other.append(val) + + for i in reversed(del_other): + other_args.pop(i) + other_sets = Intersection(*(other_args + new_other)) + if other_sets.is_EmptySet: + return EmptySet() + res += Intersection( + s.func(*unk), other_sets, evaluate=False) + return res # If any of the sets are unions, return a Union of Intersections for s in args: @@ -1813,7 +1789,7 @@ def _intersect(self, other): """ if isinstance(other, self.__class__): return self.__class__(*(self._elements & other._elements)) - return self.__class__(*[el for el in self if el in other]) + return self.__class__(el for el in self if el in other) def _complement(self, other): if isinstance(other, Interval):
factor gives an invalid expression This was reported on the mailing list a while back ``` In [29]: h = 2*x*(-2*x + Abs(x))*(x**2 - 1)/Abs(x**2 - 1) + (x/Abs(x) - 2)*Abs(x**2 - 1) In [30]: print(factor(h)) (x - 1)*(x**4 - 6*x**3*Abs(x) + x**3 - 6*x**2*Abs(x) + x**2 + 2*x*Abs(x) - x + 2*Abs(x))/(Abs(x)*Abs(x**2 - 1)) In [31]: h.subs(x, 2) Out[31]: -11 In [32]: factor(h).subs(x, 2) Out[32]: -53/3 ```
sympy/sympy
diff --git a/sympy/core/tests/test_expr.py b/sympy/core/tests/test_expr.py index 49b3d5e614..5313766db9 100644 --- a/sympy/core/tests/test_expr.py +++ b/sympy/core/tests/test_expr.py @@ -1686,7 +1686,13 @@ def test_issue_6325(): e.diff(t, 2) == ans assert diff(e, t, 2, simplify=False) != ans + def test_issue_7426(): f1 = a % c f2 = x % z assert f1.equals(f2) == False + + +def test_issue_10161(): + x = symbols('x', real=True) + assert x*abs(x)*abs(x) == x**3 diff --git a/sympy/polys/tests/test_polyutils.py b/sympy/polys/tests/test_polyutils.py index 2ea39b7b7a..7b27d42f14 100644 --- a/sympy/polys/tests/test_polyutils.py +++ b/sympy/polys/tests/test_polyutils.py @@ -1,6 +1,7 @@ """Tests for useful utilities for higher level polynomial classes. """ -from sympy import S, Integer, sin, cos, sqrt, symbols, pi, Eq, Integral, exp +from sympy import (S, Integer, sin, cos, sqrt, symbols, pi, + Eq, Integral, exp, Mul) from sympy.utilities.pytest import raises from sympy.polys.polyutils import ( @@ -269,6 +270,8 @@ def test__parallel_dict_from_expr_no_gens(): assert parallel_dict_from_expr([x*y, 2*z, Integer(3)]) == \ ([{(1, 1, 0): Integer( 1)}, {(0, 0, 1): Integer(2)}, {(0, 0, 0): Integer(3)}], (x, y, z)) + assert parallel_dict_from_expr((Mul(x, x**2, evaluate=False),)) == \ + ([{(3,): 1}], (x,)) def test_parallel_dict_from_expr(): diff --git a/sympy/sets/tests/test_sets.py b/sympy/sets/tests/test_sets.py index cc9065d56c..03386b8984 100644 --- a/sympy/sets/tests/test_sets.py +++ b/sympy/sets/tests/test_sets.py @@ -222,7 +222,6 @@ def test_intersect(): assert Interval(0, 2).intersect(Union(Interval(0, 1), Interval(2, 3))) == \ Union(Interval(0, 1), Interval(2, 2)) - assert FiniteSet(1, 2)._intersect((1, 2, 3)) == FiniteSet(1, 2) assert FiniteSet(1, 2, x).intersect(FiniteSet(x)) == FiniteSet(x) assert FiniteSet('ham', 'eggs').intersect(FiniteSet('ham')) == \ FiniteSet('ham') @@ -920,7 +919,7 @@ def test_issue_Symbol_inter(): assert Intersection(FiniteSet(m, n, x), FiniteSet(m, z), r) == \ Intersection(r, FiniteSet(m, z), FiniteSet(n, x)) assert Intersection(FiniteSet(m, n, 3), FiniteSet(m, n, x), r) == \ - Intersection(r, FiniteSet(3, m, n), evaluate=False) + Intersection(r, FiniteSet(x), FiniteSet(3, m, n), evaluate=False) assert Intersection(FiniteSet(m, n, 3), FiniteSet(m, n, 2, 3), r) == \ Union(FiniteSet(3), Intersection(r, FiniteSet(m, n))) assert Intersection(r, FiniteSet(mat, 2, n), FiniteSet(0, mat, n)) == \ @@ -929,10 +928,3 @@ def test_issue_Symbol_inter(): Intersection(r, FiniteSet(sin(x), cos(x))) assert Intersection(FiniteSet(x**2, 1, sin(x)), FiniteSet(x**2, 2, sin(x)), r) == \ Intersection(r, FiniteSet(x**2, sin(x))) - - -def test_issue_10113(): - f = x**2/(x**2 - 4) - assert imageset(x, f, S.Reals) == Union(Interval(-oo, 0), Interval(1, oo, True, True)) - assert imageset(x, f, Interval(-2, 2)) == Interval(-oo, 0) - assert imageset(x, f, Interval(-2, 3)) == Union(Interval(-oo, 0), Interval(S(9)/5, oo))
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 3, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 5 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work mpmath==1.3.0 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 -e git+https://github.com/sympy/sympy.git@cbc73f198998c4d53b228511ed3a821fa4e1bb8f#egg=sympy toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: sympy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - mpmath==1.3.0 prefix: /opt/conda/envs/sympy
[ "sympy/core/tests/test_expr.py::test_issue_10161", "sympy/polys/tests/test_polyutils.py::test__parallel_dict_from_expr_no_gens", "sympy/sets/tests/test_sets.py::test_issue_Symbol_inter" ]
[]
[ "sympy/core/tests/test_expr.py::test_basic", "sympy/core/tests/test_expr.py::test_ibasic", "sympy/core/tests/test_expr.py::test_relational", "sympy/core/tests/test_expr.py::test_relational_assumptions", "sympy/core/tests/test_expr.py::test_relational_noncommutative", "sympy/core/tests/test_expr.py::test_basic_nostr", "sympy/core/tests/test_expr.py::test_series_expansion_for_uniform_order", "sympy/core/tests/test_expr.py::test_leadterm", "sympy/core/tests/test_expr.py::test_as_leading_term", "sympy/core/tests/test_expr.py::test_leadterm2", "sympy/core/tests/test_expr.py::test_leadterm3", "sympy/core/tests/test_expr.py::test_as_leading_term2", "sympy/core/tests/test_expr.py::test_as_leading_term3", "sympy/core/tests/test_expr.py::test_as_leading_term4", "sympy/core/tests/test_expr.py::test_as_leading_term_stub", "sympy/core/tests/test_expr.py::test_atoms", "sympy/core/tests/test_expr.py::test_is_polynomial", "sympy/core/tests/test_expr.py::test_is_rational_function", "sympy/core/tests/test_expr.py::test_is_algebraic_expr", "sympy/core/tests/test_expr.py::test_SAGE1", "sympy/core/tests/test_expr.py::test_SAGE2", "sympy/core/tests/test_expr.py::test_SAGE3", "sympy/core/tests/test_expr.py::test_len", "sympy/core/tests/test_expr.py::test_doit", "sympy/core/tests/test_expr.py::test_attribute_error", "sympy/core/tests/test_expr.py::test_args", "sympy/core/tests/test_expr.py::test_noncommutative_expand_issue_3757", "sympy/core/tests/test_expr.py::test_as_numer_denom", "sympy/core/tests/test_expr.py::test_as_independent", "sympy/core/tests/test_expr.py::test_replace", "sympy/core/tests/test_expr.py::test_find", "sympy/core/tests/test_expr.py::test_count", "sympy/core/tests/test_expr.py::test_has_basics", "sympy/core/tests/test_expr.py::test_has_multiple", "sympy/core/tests/test_expr.py::test_has_piecewise", "sympy/core/tests/test_expr.py::test_has_iterative", "sympy/core/tests/test_expr.py::test_has_integrals", "sympy/core/tests/test_expr.py::test_has_tuple", "sympy/core/tests/test_expr.py::test_has_units", "sympy/core/tests/test_expr.py::test_has_polys", "sympy/core/tests/test_expr.py::test_has_physics", "sympy/core/tests/test_expr.py::test_as_poly_as_expr", "sympy/core/tests/test_expr.py::test_nonzero", "sympy/core/tests/test_expr.py::test_is_number", "sympy/core/tests/test_expr.py::test_as_coeff_add", "sympy/core/tests/test_expr.py::test_as_coeff_mul", "sympy/core/tests/test_expr.py::test_as_coeff_exponent", "sympy/core/tests/test_expr.py::test_extractions", "sympy/core/tests/test_expr.py::test_nan_extractions", "sympy/core/tests/test_expr.py::test_coeff", "sympy/core/tests/test_expr.py::test_coeff2", "sympy/core/tests/test_expr.py::test_coeff2_0", "sympy/core/tests/test_expr.py::test_coeff_expand", "sympy/core/tests/test_expr.py::test_integrate", "sympy/core/tests/test_expr.py::test_as_base_exp", "sympy/core/tests/test_expr.py::test_issue_4963", "sympy/core/tests/test_expr.py::test_action_verbs", "sympy/core/tests/test_expr.py::test_as_powers_dict", "sympy/core/tests/test_expr.py::test_as_coefficients_dict", "sympy/core/tests/test_expr.py::test_args_cnc", "sympy/core/tests/test_expr.py::test_new_rawargs", "sympy/core/tests/test_expr.py::test_issue_5226", "sympy/core/tests/test_expr.py::test_free_symbols", "sympy/core/tests/test_expr.py::test_issue_5300", "sympy/core/tests/test_expr.py::test_as_coeff_Mul", "sympy/core/tests/test_expr.py::test_as_coeff_Add", "sympy/core/tests/test_expr.py::test_expr_sorting", "sympy/core/tests/test_expr.py::test_as_ordered_factors", "sympy/core/tests/test_expr.py::test_as_ordered_terms", "sympy/core/tests/test_expr.py::test_sort_key_atomic_expr", "sympy/core/tests/test_expr.py::test_issue_4199", "sympy/core/tests/test_expr.py::test_eval_interval_zoo", "sympy/core/tests/test_expr.py::test_primitive", "sympy/core/tests/test_expr.py::test_issue_5843", "sympy/core/tests/test_expr.py::test_is_constant", "sympy/core/tests/test_expr.py::test_equals", "sympy/core/tests/test_expr.py::test_random", "sympy/core/tests/test_expr.py::test_round", "sympy/core/tests/test_expr.py::test_round_exception_nostr", "sympy/core/tests/test_expr.py::test_extract_branch_factor", "sympy/core/tests/test_expr.py::test_identity_removal", "sympy/core/tests/test_expr.py::test_float_0", "sympy/core/tests/test_expr.py::test_issue_6325", "sympy/core/tests/test_expr.py::test_issue_7426", "sympy/polys/tests/test_polyutils.py::test__nsort", "sympy/polys/tests/test_polyutils.py::test__sort_gens", "sympy/polys/tests/test_polyutils.py::test__unify_gens", "sympy/polys/tests/test_polyutils.py::test__analyze_gens", "sympy/polys/tests/test_polyutils.py::test__sort_factors", "sympy/polys/tests/test_polyutils.py::test__dict_from_expr_if_gens", "sympy/polys/tests/test_polyutils.py::test__dict_from_expr_no_gens", "sympy/polys/tests/test_polyutils.py::test__parallel_dict_from_expr_if_gens", "sympy/polys/tests/test_polyutils.py::test_parallel_dict_from_expr", "sympy/polys/tests/test_polyutils.py::test_dict_from_expr", "sympy/sets/tests/test_sets.py::test_interval_arguments", "sympy/sets/tests/test_sets.py::test_interval_symbolic_end_points", "sympy/sets/tests/test_sets.py::test_union", "sympy/sets/tests/test_sets.py::test_difference", "sympy/sets/tests/test_sets.py::test_Complement", "sympy/sets/tests/test_sets.py::test_complement", "sympy/sets/tests/test_sets.py::test_intersect", "sympy/sets/tests/test_sets.py::test_intersection", "sympy/sets/tests/test_sets.py::test_issue_9623", "sympy/sets/tests/test_sets.py::test_is_disjoint", "sympy/sets/tests/test_sets.py::test_ProductSet_of_single_arg_is_arg", "sympy/sets/tests/test_sets.py::test_interval_subs", "sympy/sets/tests/test_sets.py::test_interval_to_mpi", "sympy/sets/tests/test_sets.py::test_measure", "sympy/sets/tests/test_sets.py::test_is_subset", "sympy/sets/tests/test_sets.py::test_is_proper_subset", "sympy/sets/tests/test_sets.py::test_is_superset", "sympy/sets/tests/test_sets.py::test_is_proper_superset", "sympy/sets/tests/test_sets.py::test_contains", "sympy/sets/tests/test_sets.py::test_interval_symbolic", "sympy/sets/tests/test_sets.py::test_union_contains", "sympy/sets/tests/test_sets.py::test_is_number", "sympy/sets/tests/test_sets.py::test_Interval_is_left_unbounded", "sympy/sets/tests/test_sets.py::test_Interval_is_right_unbounded", "sympy/sets/tests/test_sets.py::test_Interval_as_relational", "sympy/sets/tests/test_sets.py::test_Finite_as_relational", "sympy/sets/tests/test_sets.py::test_Union_as_relational", "sympy/sets/tests/test_sets.py::test_Intersection_as_relational", "sympy/sets/tests/test_sets.py::test_EmptySet", "sympy/sets/tests/test_sets.py::test_finite_basic", "sympy/sets/tests/test_sets.py::test_powerset", "sympy/sets/tests/test_sets.py::test_product_basic", "sympy/sets/tests/test_sets.py::test_real", "sympy/sets/tests/test_sets.py::test_supinf", "sympy/sets/tests/test_sets.py::test_universalset", "sympy/sets/tests/test_sets.py::test_Union_of_ProductSets_shares", "sympy/sets/tests/test_sets.py::test_Interval_free_symbols", "sympy/sets/tests/test_sets.py::test_image_interval", "sympy/sets/tests/test_sets.py::test_image_piecewise", "sympy/sets/tests/test_sets.py::test_image_FiniteSet", "sympy/sets/tests/test_sets.py::test_image_Union", "sympy/sets/tests/test_sets.py::test_image_EmptySet", "sympy/sets/tests/test_sets.py::test_issue_5724_7680", "sympy/sets/tests/test_sets.py::test_boundary", "sympy/sets/tests/test_sets.py::test_boundary_Union", "sympy/sets/tests/test_sets.py::test_boundary_ProductSet", "sympy/sets/tests/test_sets.py::test_boundary_ProductSet_line", "sympy/sets/tests/test_sets.py::test_is_open", "sympy/sets/tests/test_sets.py::test_is_closed", "sympy/sets/tests/test_sets.py::test_closure", "sympy/sets/tests/test_sets.py::test_interior", "sympy/sets/tests/test_sets.py::test_issue_7841", "sympy/sets/tests/test_sets.py::test_Eq", "sympy/sets/tests/test_sets.py::test_SymmetricDifference", "sympy/sets/tests/test_sets.py::test_issue_9536", "sympy/sets/tests/test_sets.py::test_issue_9637", "sympy/sets/tests/test_sets.py::test_issue_9808", "sympy/sets/tests/test_sets.py::test_issue_9956" ]
[]
BSD
307
twisted__tubes-32
188aad3c9caf5d07e894d1fda6fb5f41b8bfc41a
2015-11-22 00:11:15
188aad3c9caf5d07e894d1fda6fb5f41b8bfc41a
diff --git a/.travis.yml b/.travis.yml index 04f9ff1..6944375 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,7 +5,6 @@ env: - secure: "CvFj8Df5OiDRrW7EsTGhkltdmNlYerx9hH/tSKxiNFVDBUUFaTN7rUr7kWcOKchzerGwk7zjZ4SRXyoSCs+Srht6GZxWHkNROwKpp5Xvf5clbLXbp7GO1X/L5rLgrXpGwtkhgNuHx0X2IUCDHUQAUSumPgZcNFu3emgVxEqabN0=" matrix: - TOX_ENV=lint - - TOX_ENV=py26 - TOX_ENV=py27 - TOX_ENV=pypy - TOX_ENV=docs diff --git a/tubes/fan.py b/tubes/fan.py index 6322c99..67ad05f 100644 --- a/tubes/fan.py +++ b/tubes/fan.py @@ -355,7 +355,7 @@ class _OutDrain(object): self._pause = None if p is not None: p.unpause() - self.fount = fount + beginFlowingFrom(self, fount) def receive(self, item): diff --git a/tubes/routing.py b/tubes/routing.py index 7682fe5..725d868 100644 --- a/tubes/routing.py +++ b/tubes/routing.py @@ -1,4 +1,4 @@ -# -*- test-case-name: tubes.test.test_fan -*- +# -*- test-case-name: tubes.test.test_routing -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. @@ -8,87 +8,97 @@ an appropriate output, stripping the addressing information off. Use like so:: - from tubes.routing import Router, Routed, to + from tubes.tube import receiver, series + from tubes.routing import Router, to - aRouter = Router(int) + aRouter = Router() + evens = aRouter.newRoute() + odds = aRouter.newRoute() - evens, evenFount = aRouter.newRoute() - odds, oddFount = aRouter.newRoute() + @receiver() + def evenOdd(item): + if (item % 2) == 0: + yield to(evens, item) + else: + yield to(odds, item) - @tube - class EvenOdd(object): - outputType = Routed(int) - def received(self, item): - if (item % 2) == 0: - yield to(evens, item) - else: - yield to(odds, item) + numbers.flowTo(series(evenOdd, aRouter.drain)) - numbers.flowTo(aRouter) +Assuming C{numbers} is a fount of counting integers, this creates two founts: +C{evens} and C{odds}, whose outputs are even and odd integers, respectively. +Note that C{evenOdd} also uses C{evens} and C{odds} as I{addresses}; the first +argument to L{to} says I{where} the value will go. -This creates a fount in evenFount and oddFount, which each have an outputType -of "int". - -Why do this rather than just having C{EvenOdd} just call methods directly based +Why do this rather than just having C{evenOdd} just call methods directly based on whether a number is even or odd? By using a L{Router}, flow control relationships are automatically preserved by -the same mechanism that tubes usually use. The distinct drains of evenFount -and oddFount can both be independently paused, and the pause state will be +the same mechanism that tubes usually use. The distinct drains of C{evens} and +C{odds} can both independently pause their founts, and the pause state will be propagated to the "numbers" fount. If you want to send on outputs to multiple drains which may have complex flow-control interrelationships, you can't do that by calling the C{receive} method directly since any one of those methods -might reentrantly pause you. +might reentrantly pause its fount. """ -from .tube import tube, receiver +from zope.interface import implementer + +from .tube import receiver, series +from .itube import IDrain from .fan import Out +from .kit import beginFlowingFrom if 0: - from zope.interface.interfaces import IInterface - IInterface + from zope.interface.interfaces import ISpecification + ISpecification +__all__ = [ + "Router", + "Routed", + "to", +] class Routed(object): """ - A L{Routed} is an interface describing another interface that has been - wrapped in a C{to}. As such, it is an incomplete implementation of - L{IInterface}. + A L{Routed} is a specification describing another specification that has + been wrapped in a C{to}. As such, it is an incomplete implementation of + L{ISpecification}. """ - def __init__(self, interface=None): + def __init__(self, specification=None): """ - Derive a L{Routed} version of C{interface}. + Derive a L{Routed} version of C{specification}. - @param interface: the interface that will be provided by the C{what} - attribute of providers of this interface. - @type interface: L{IInterface} + @param specification: the specification that will be provided by the + C{what} attribute of providers of this specification. + @type specification: L{ISpecification} """ - self.interface = interface + self.specification = specification def isOrExtends(self, other): """ Is this L{Routed} substitutable for the given specification? - @param other: Another L{Routed} or interface. - @type other: L{IInterface} + @param other: Another L{Routed} or specification. + @type other: L{ISpecification} @return: L{True} if so, L{False} if not. """ if not isinstance(other, Routed): return False - if self.interface is None or other.interface is None: + if self.specification is None or other.specification is None: return True - return self.interface.isOrExtends(other.interface) + return self.specification.isOrExtends(other.specification) def providedBy(self, instance): """ Is this L{Routed} provided by a particular value? - @param instance: an object which may or may not provide this interface. + @param instance: an object which may or may not provide this + specification. @type instance: L{object} @return: L{True} if so, L{False} if not. @@ -96,9 +106,27 @@ class Routed(object): """ if not isinstance(instance, _To): return False - if self.interface is None: + if self.specification is None: return True - return self.interface.providedBy(instance._what) + return self.specification.providedBy(instance._what) + + + def __eq__(self, other): + """ + Routed(X) compares equal to Routed(X). + """ + if not isinstance(other, Routed): + return NotImplemented + return self.specification == other.specification + + + def __ne__(self, other): + """ + Routed(X) compares unequal to Routed(Y). + """ + if not isinstance(other, Routed): + return NotImplemented + return self.specification != other.specification @@ -119,6 +147,13 @@ class _To(object): self._what = what + def __repr__(self): + """ + @return: an explanatory string. + """ + return "to({!r}, {!r})".format(self._where, self._what) + + def to(where, what): """ @@ -138,7 +173,6 @@ def to(where, what): -@tube class Router(object): """ A drain with multiple founts that consumes L{Routed}C{(IX)} from its input @@ -154,10 +188,21 @@ class Router(object): def __init__(self, outputType=None): self._out = Out() self._outputType = outputType + @implementer(IDrain) + class NullDrain(object): + inputType = outputType + fount = None + def flowingFrom(self, fount): + beginFlowingFrom(self, fount) + def receive(self, item): + pass + def flowStopped(self, reason): + pass + self.newRoute().flowTo(NullDrain()) self.drain = self._out.drain - def newRoute(self): + def newRoute(self, name=None): """ Create a new route. @@ -168,13 +213,18 @@ class Router(object): to L{Router.drain} should be a L{to} constructed with a value returned from this method as the "where" parameter. + @param name: Give the route a name for debugging purposes. + @type name: native L{str} + @return: L{IFount} """ @receiver(inputType=Routed(self._outputType), - outputType=self._outputType) + outputType=self._outputType, + name=name) def received(item): - if isinstance(item, to): - if item._where is fount: - yield item._what - fount = self._out.newFount().flowTo(received) + if not isinstance(item, _To): + raise TypeError("{0} is not routed".format(item)) + if item._where is fount: + yield item._what + fount = self._out.newFount().flowTo(series(received)) return fount diff --git a/tubes/tube.py b/tubes/tube.py index fe23b49..39de857 100644 --- a/tubes/tube.py +++ b/tubes/tube.py @@ -123,17 +123,20 @@ class _Tubule(object): """ A tube created for the C{@tube} decorator. """ - def __init__(self, inputType, outputType, received): + def __init__(self, inputType, outputType, received, name): """ @param inputType: An interface for the input type. @param outputType: an interface for the output type. @param received: a callable to implement C{received}. + + @param name: a string describing this L{_Tubule}. """ self.inputType = inputType self.outputType = outputType self.received = received + self._name = name def started(self): @@ -156,8 +159,15 @@ class _Tubule(object): return () + def __repr__(self): + """ + @return: this L{_Tubule}'s name. + """ + return self._name -def receiver(inputType=None, outputType=None): + + +def receiver(inputType=None, outputType=None, name=None): """ Decorator for a stateless function which receives inputs. @@ -171,12 +181,16 @@ def receiver(inputType=None, outputType=None): @param outputType: The C{outputType} attribute of the resulting L{ITube}. + @param name: a name describing the tubule for it to show as in a C{repr}. + @type name: native L{str} + @return: a stateless tube with the decorated method as its C{received} method. @rtype: L{ITube} """ def decorator(decoratee): - return _Tubule(inputType, outputType, decoratee) + return _Tubule(inputType, outputType, decoratee, + name if name is not None else decoratee.__name__) return decorator
tubes Router is broken Besides the broken docstring for the Router as reported in #21 the Router also generally appears not to work; or am I missing something simple here? see my attempt at creating unit tests for Router: https://github.com/david415/tubes/tree/add_routing_tests.0 https://github.com/david415/tubes/blob/add_routing_tests.0/tubes/test/test_routing.py When the Router's newRoute() method is called this results in: ``` $ trial tubes.test.test_routing tubes.test.test_routing TestBasicRouter test_basic_router ... [ERROR] =============================================================================== [ERROR] Traceback (most recent call last): File "/home/user/tubes/tubes/test/test_routing.py", line 64, in test_basic_router evenOddTube.addRoutes() File "/home/user/tubes/tubes/test/test_routing.py", line 41, in addRoutes self.evenRoute = self.newRoute() File "/home/user/tubes/tubes/routing.py", line 179, in newRoute fount = self._out.newFount().flowTo(received) File "/home/user/tubes/tubes/fan.py", line 253, in flowTo return beginFlowingTo(self, drain) File "/home/user/tubes/tubes/kit.py", line 104, in beginFlowingTo return drain.flowingFrom(fount) exceptions.AttributeError: '_Tubule' object has no attribute 'flowingFrom' tubes.test.test_routing.TestBasicRouter.test_basic_router ------------------------------------------------------------------------------- Ran 1 tests in 0.018s FAILED (errors=1) ```
twisted/tubes
diff --git a/tubes/test/test_routing.py b/tubes/test/test_routing.py new file mode 100644 index 0000000..f0b4225 --- /dev/null +++ b/tubes/test/test_routing.py @@ -0,0 +1,117 @@ +# -*- test-case-name: tubes.test.test_routing -*- +# Copyright (c) Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{tubes.routing}. +""" + +from unittest import TestCase + +from ..routing import Router, to, Routed +from ..tube import series, receiver +from .util import FakeFount, FakeDrain, IFakeOutput, IFakeInput + +if 0: + # Names used by PyDoctor. + from ..itube import IFount + IFount + + + +class RouterTests(TestCase): + """ + Tests for L{Router}. + """ + + def test_twoRoutes(self): + """ + The L{IFount} feeding into a L{Router} may yield L{to} each route + returned from L{Router.newRoute}. + """ + @receiver() + def chooser(item): + if item % 2: + yield to(odd, item) + else: + yield to(even, item) + router = Router() + even = router.newRoute("even") + evens = FakeDrain() + even.flowTo(evens) + odd = router.newRoute("odd") + odds = FakeDrain() + odd.flowTo(odds) + ff = FakeFount() + routeDrain = series(chooser, router.drain) + ff.flowTo(routeDrain) + for x in range(10): + ff.drain.receive(x) + self.assertEqual(odds.received, [1, 3, 5, 7, 9]) + self.assertEqual(evens.received, [0, 2, 4, 6, 8]) + + + def test_routeRepr(self): + """ + It's useful to C{repr} a route for debugging purposes; if we give it a + name, its C{repr} will contain that name. + """ + router = Router() + route = router.newRoute("hello") + self.assertTrue("hello" in repr(route)) + + + def test_defaultTypeChecking(self): + """ + L{Router}'s drain accepts only L{Routed} objects; if no other type is + specified, L{Routed}C{(None)}. + """ + router = Router() + ff = FakeFount(IFakeOutput) + self.assertEqual(router.drain.inputType, Routed(None)) + self.assertRaises(TypeError, ff.flowTo, router.drain) + self.assertEqual(router.newRoute().outputType, None) + + + def test_specifiedTypeChecking(self): + """ + The C{outputType} argument to L{Router}'s constructor specifies the + type of output that its routes will provide, and also the routed type + required as an input. + """ + router = Router(IFakeInput) + incorrect = FakeDrain(IFakeOutput) + correct = FakeDrain(IFakeInput) + self.assertEqual(router.drain.inputType, Routed(IFakeInput)) + self.assertEqual(router.newRoute().outputType, IFakeInput) + self.assertRaises(TypeError, router.newRoute().flowTo, incorrect) + self.assertEqual(router.newRoute().flowTo(correct), None) + correctFount = FakeFount(Routed(IFakeInput)) + incorrectFount = FakeFount(Routed(IFakeOutput)) + self.assertRaises(TypeError, incorrectFount.flowTo, router.drain) + self.assertEquals(None, correctFount.flowTo(router.drain)) + + + +class RoutedTests(TestCase): + """ + Tests for L{Routed}. + """ + + def test_eq(self): + """ + C{==} on L{Routed} is L{True} for equivalent ones, L{False} otherwise. + """ + self.assertEqual(True, Routed(IFakeInput) == Routed(IFakeInput)) + self.assertEqual(False, Routed(IFakeInput) == Routed(IFakeOutput)) + self.assertEqual(False, Routed() == 7) + + + def test_ne(self): + """ + C{==} on L{Routed} is L{False} for equivalent ones, L{True} otherwise. + """ + self.assertEqual(False, Routed(IFakeInput) != Routed(IFakeInput)) + self.assertEqual(True, Routed(IFakeInput) != Routed(IFakeOutput)) + self.assertEqual(True, Routed() != 7) + diff --git a/tubes/test/util.py b/tubes/test/util.py index dd36224..0ebf41b 100644 --- a/tubes/test/util.py +++ b/tubes/test/util.py @@ -77,13 +77,12 @@ class FakeDrain(object): @type stopped: L{list} """ - inputType = None - fount = None - def __init__(self): + def __init__(self, inputType=None): self.received = [] self.stopped = [] + self.inputType = inputType def flowingFrom(self, fount):
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_issue_reference", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 1 }, "num_modified_files": 4 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==25.3.0 Automat==24.8.1 characteristic==14.3.0 constantly==23.10.4 coverage==7.8.0 exceptiongroup==1.2.2 hyperlink==21.0.0 idna==3.10 incremental==24.7.2 iniconfig==2.1.0 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 pytest-cov==6.0.0 six==1.17.0 tomli==2.2.1 -e git+https://github.com/twisted/tubes.git@188aad3c9caf5d07e894d1fda6fb5f41b8bfc41a#egg=Tubes Twisted==24.11.0 typing_extensions==4.13.0 zope.interface==7.2
name: tubes channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==25.3.0 - automat==24.8.1 - characteristic==14.3.0 - constantly==23.10.4 - coverage==7.8.0 - exceptiongroup==1.2.2 - hyperlink==21.0.0 - idna==3.10 - incremental==24.7.2 - iniconfig==2.1.0 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - pytest-cov==6.0.0 - six==1.17.0 - tomli==2.2.1 - twisted==24.11.0 - typing-extensions==4.13.0 - zope-interface==7.2 prefix: /opt/conda/envs/tubes
[ "tubes/test/test_routing.py::RouterTests::test_defaultTypeChecking", "tubes/test/test_routing.py::RouterTests::test_routeRepr", "tubes/test/test_routing.py::RouterTests::test_specifiedTypeChecking", "tubes/test/test_routing.py::RouterTests::test_twoRoutes", "tubes/test/test_routing.py::RoutedTests::test_eq", "tubes/test/test_routing.py::RoutedTests::test_ne" ]
[]
[]
[]
MIT License
308
sympy__sympy-10167
d176aeffe835cdcc05dad5339ee6eb06bf59bd46
2015-11-22 04:24:23
0241b35cae5f2adc04dc37b25f7dc9c5f00bd746
smichr: @asmeurer , do you have any objections to the change to `fuzzy_bool` which will now output None for values that are neither True nor False. Although this breaks backward compatibility (b/c formerly `fuzzy_bool(x)` would give True) there were no tests that needed this behavior.
diff --git a/sympy/core/logic.py b/sympy/core/logic.py index 077f277c76..d12bee1562 100644 --- a/sympy/core/logic.py +++ b/sympy/core/logic.py @@ -64,11 +64,23 @@ def fuzzy_bool(x): """Return True, False or None according to x. Whereas bool(x) returns True or False, fuzzy_bool allows - for the None value. + for the None value and non-false values (which become None), too. + + Examples + ======== + + >>> from sympy.core.logic import fuzzy_bool + >>> from sympy.abc import x + >>> fuzzy_bool(x), fuzzy_bool(None) + (None, None) + >>> bool(x), bool(None) + (True, False) + """ if x is None: return None - return bool(x) + if x in (True, False): + return bool(x) def fuzzy_and(args): diff --git a/sympy/sets/sets.py b/sympy/sets/sets.py index 4915c102aa..3b0ba18a62 100644 --- a/sympy/sets/sets.py +++ b/sympy/sets/sets.py @@ -1396,7 +1396,7 @@ def flatten(arg): args = flatten(args) if len(args) == 0: - raise TypeError("Intersection expected at least one argument") + return S.EmptySet # args can't be ordered for Partition see issue #9608 if 'Partition' not in [type(a).__name__ for a in args]: @@ -1435,6 +1435,72 @@ def __iter__(self): raise ValueError("None of the constituent sets are iterable") + @staticmethod + def _handle_finite_sets(args): + from sympy.core.logic import fuzzy_and, fuzzy_bool + from sympy.core.compatibility import zip_longest + + new_args = [] + fs_args = [] + for s in args: + if s.is_FiniteSet: + fs_args.append(s) + else: + new_args.append(s) + if not fs_args: + return + s = fs_args[0] + fs_args = fs_args[1:] + res = [] + unk = [] + for x in s: + c = fuzzy_and(fuzzy_bool(o.contains(x)) + for o in fs_args + new_args) + if c: + res.append(x) + elif c is None: + unk.append(x) + else: + pass # drop arg + res = FiniteSet( + *res, evaluate=False) if res else S.EmptySet + if unk: + symbolic_s_list = [x for x in s if x.has(Symbol)] + non_symbolic_s = s - FiniteSet( + *symbolic_s_list, evaluate=False) + while fs_args: + v = fs_args.pop() + if all(i == j for i, j in zip_longest( + symbolic_s_list, + (x for x in v if x.has(Symbol)))): + # all the symbolic elements of `v` are the same + # as in `s` so remove the non-symbol containing + # expressions from `unk`, since they cannot be + # contained + for x in non_symbolic_s: + if x in unk: + unk.remove(x) + else: + # if only a subset of elements in `s` are + # contained in `v` then remove them from `v` + # and add this as a new arg + contained = [x for x in symbolic_s_list + if v.contains(x) == True] + if contained != symbolic_s_list: + new_args.append( + v - FiniteSet( + *contained, evaluate=False)) + else: + pass # for coverage + + other_sets = Intersection(*new_args) + if not other_sets: + return S.EmptySet # b/c we use evaluate=False below + res += Intersection( + FiniteSet(*unk), + other_sets, evaluate=False) + return res + @staticmethod def reduce(args): """ @@ -1452,52 +1518,10 @@ def reduce(args): if any(s.is_EmptySet for s in args): return S.EmptySet - for s in args: - if s.is_FiniteSet: - # see which elements of the FiniteSet occur within - # all other sets in the intersection - other_args = [a for a in args if a != s] - res = FiniteSet(*[x for x in s if all( - o.contains(x) == True for o in other_args)]) - unk = [x for x in s if any( - o.contains(x) not in (True, False) for o in other_args)] - if unk: - new_other = [] - del_other = [] - for ival, val in enumerate(other_args): - if val.is_FiniteSet: - # collect expressions having symbols - # from `val` and `s` - symbol_in_val = [x for x in val if x.has(Symbol)] - symbol_in_s = [x for x in s if x.has(Symbol)] - del_other.append(ival) - # if expression with symbols are same in `s` and `val` - # then remove the non-symbol containing expressions - # from `unk`, since they can not be contained - if symbol_in_s == symbol_in_val: - syms = FiniteSet(*symbol_in_s, evaluate=False) - non_symbol_in_s = s - syms - s = syms - for x in non_symbol_in_s: - if x in unk: - unk.remove(x) - else: - fin = FiniteSet(* - [x for x in symbol_in_s - if val.contains(x) == True], - evaluate=False) - if s != fin: - val = val - fin - new_other.append(val) - - for i in reversed(del_other): - other_args.pop(i) - other_sets = Intersection(*(other_args + new_other)) - if other_sets.is_EmptySet: - return EmptySet() - res += Intersection( - s.func(*unk), other_sets, evaluate=False) - return res + # Handle Finite sets + rv = Intersection._handle_finite_sets(args) + if rv is not None: + return rv # If any of the sets are unions, return a Union of Intersections for s in args: @@ -1789,7 +1813,7 @@ def _intersect(self, other): """ if isinstance(other, self.__class__): return self.__class__(*(self._elements & other._elements)) - return self.__class__(el for el in self if el in other) + return self.__class__(*[el for el in self if el in other]) def _complement(self, other): if isinstance(other, Interval):
Intersection gives wrong result This is one of the tests in the test suite. The following demonstrates that Intersection gives the wrong result. ```python >>> from sympy import * >>> from sympy.abc import m,n,x >>> r = S.Reals >>> args = Tuple(r, FiniteSet(m,n,3), FiniteSet(m,n,x)) >>> reps = dict(m=1,n=2,x=3) >>> args.subs(reps) ((-oo, oo), {1, 2, 3}, {1, 2, 3}) >>> Intersection(_) {1, 2, 3} >>> Intersection(args).subs(reps) {3} ``` I am running tests on my `reduce` branch that fixes this.
sympy/sympy
diff --git a/sympy/sets/tests/test_sets.py b/sympy/sets/tests/test_sets.py index a61faac626..cc9065d56c 100644 --- a/sympy/sets/tests/test_sets.py +++ b/sympy/sets/tests/test_sets.py @@ -222,6 +222,7 @@ def test_intersect(): assert Interval(0, 2).intersect(Union(Interval(0, 1), Interval(2, 3))) == \ Union(Interval(0, 1), Interval(2, 2)) + assert FiniteSet(1, 2)._intersect((1, 2, 3)) == FiniteSet(1, 2) assert FiniteSet(1, 2, x).intersect(FiniteSet(x)) == FiniteSet(x) assert FiniteSet('ham', 'eggs').intersect(FiniteSet('ham')) == \ FiniteSet('ham') @@ -919,7 +920,7 @@ def test_issue_Symbol_inter(): assert Intersection(FiniteSet(m, n, x), FiniteSet(m, z), r) == \ Intersection(r, FiniteSet(m, z), FiniteSet(n, x)) assert Intersection(FiniteSet(m, n, 3), FiniteSet(m, n, x), r) == \ - Intersection(r, FiniteSet(x), FiniteSet(3, m, n), evaluate=False) + Intersection(r, FiniteSet(3, m, n), evaluate=False) assert Intersection(FiniteSet(m, n, 3), FiniteSet(m, n, 2, 3), r) == \ Union(FiniteSet(3), Intersection(r, FiniteSet(m, n))) assert Intersection(r, FiniteSet(mat, 2, n), FiniteSet(0, mat, n)) == \
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 2 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "mpmath>=0.19", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 importlib-metadata==4.8.3 iniconfig==1.1.1 mpmath==1.2.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 -e git+https://github.com/sympy/sympy.git@d176aeffe835cdcc05dad5339ee6eb06bf59bd46#egg=sympy tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: sympy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - mpmath=1.2.1=py36h06a4308_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/sympy
[ "sympy/sets/tests/test_sets.py::test_intersect", "sympy/sets/tests/test_sets.py::test_issue_Symbol_inter" ]
[]
[ "sympy/sets/tests/test_sets.py::test_interval_arguments", "sympy/sets/tests/test_sets.py::test_interval_symbolic_end_points", "sympy/sets/tests/test_sets.py::test_union", "sympy/sets/tests/test_sets.py::test_difference", "sympy/sets/tests/test_sets.py::test_Complement", "sympy/sets/tests/test_sets.py::test_complement", "sympy/sets/tests/test_sets.py::test_intersection", "sympy/sets/tests/test_sets.py::test_issue_9623", "sympy/sets/tests/test_sets.py::test_is_disjoint", "sympy/sets/tests/test_sets.py::test_ProductSet_of_single_arg_is_arg", "sympy/sets/tests/test_sets.py::test_interval_subs", "sympy/sets/tests/test_sets.py::test_interval_to_mpi", "sympy/sets/tests/test_sets.py::test_measure", "sympy/sets/tests/test_sets.py::test_is_subset", "sympy/sets/tests/test_sets.py::test_is_proper_subset", "sympy/sets/tests/test_sets.py::test_is_superset", "sympy/sets/tests/test_sets.py::test_is_proper_superset", "sympy/sets/tests/test_sets.py::test_contains", "sympy/sets/tests/test_sets.py::test_interval_symbolic", "sympy/sets/tests/test_sets.py::test_union_contains", "sympy/sets/tests/test_sets.py::test_is_number", "sympy/sets/tests/test_sets.py::test_Interval_is_left_unbounded", "sympy/sets/tests/test_sets.py::test_Interval_is_right_unbounded", "sympy/sets/tests/test_sets.py::test_Interval_as_relational", "sympy/sets/tests/test_sets.py::test_Finite_as_relational", "sympy/sets/tests/test_sets.py::test_Union_as_relational", "sympy/sets/tests/test_sets.py::test_Intersection_as_relational", "sympy/sets/tests/test_sets.py::test_EmptySet", "sympy/sets/tests/test_sets.py::test_finite_basic", "sympy/sets/tests/test_sets.py::test_powerset", "sympy/sets/tests/test_sets.py::test_product_basic", "sympy/sets/tests/test_sets.py::test_real", "sympy/sets/tests/test_sets.py::test_supinf", "sympy/sets/tests/test_sets.py::test_universalset", "sympy/sets/tests/test_sets.py::test_Union_of_ProductSets_shares", "sympy/sets/tests/test_sets.py::test_Interval_free_symbols", "sympy/sets/tests/test_sets.py::test_image_interval", "sympy/sets/tests/test_sets.py::test_image_piecewise", "sympy/sets/tests/test_sets.py::test_image_FiniteSet", "sympy/sets/tests/test_sets.py::test_image_Union", "sympy/sets/tests/test_sets.py::test_image_EmptySet", "sympy/sets/tests/test_sets.py::test_issue_5724_7680", "sympy/sets/tests/test_sets.py::test_boundary", "sympy/sets/tests/test_sets.py::test_boundary_Union", "sympy/sets/tests/test_sets.py::test_boundary_ProductSet", "sympy/sets/tests/test_sets.py::test_boundary_ProductSet_line", "sympy/sets/tests/test_sets.py::test_is_open", "sympy/sets/tests/test_sets.py::test_is_closed", "sympy/sets/tests/test_sets.py::test_closure", "sympy/sets/tests/test_sets.py::test_interior", "sympy/sets/tests/test_sets.py::test_issue_7841", "sympy/sets/tests/test_sets.py::test_Eq", "sympy/sets/tests/test_sets.py::test_SymmetricDifference", "sympy/sets/tests/test_sets.py::test_issue_9536", "sympy/sets/tests/test_sets.py::test_issue_9637", "sympy/sets/tests/test_sets.py::test_issue_9808", "sympy/sets/tests/test_sets.py::test_issue_9956", "sympy/sets/tests/test_sets.py::test_issue_10113" ]
[]
BSD
309
joblib__joblib-277
484405ccea3cbcbd95e3cf241f15bf3eeb1aa8b6
2015-11-23 15:25:34
40341615cc2600675ce7457d9128fb030f6f89fa
diff --git a/joblib/hashing.py b/joblib/hashing.py index f8a9ee6..93bc5e3 100644 --- a/joblib/hashing.py +++ b/joblib/hashing.py @@ -59,7 +59,8 @@ class Hasher(Pickler): try: self.dump(obj) except pickle.PicklingError as e: - warnings.warn('PicklingError while hashing %r: %r' % (obj, e)) + e.args += ('PicklingError while hashing %r: %r' % (obj, e),) + raise dumps = self.stream.getvalue() self._hash.update(dumps) if return_digest:
Surprising behaviour when one of the cached function arguments is not picklable As reported by @arthurmensch. When one of the arguments is not picklable, the cached function result will only depend on the hash of all the arguments before the non picklable one. A simple snippet to show the problem: ```python import joblib mem = joblib.Memory('/tmp/joblib') @mem.cache() def f(a, b): return b non_picklable = lambda: None print(f(non_picklable, 'first')) print(f(non_picklable, 'second')) ``` Output: ``` /home/lesteve/dev/joblib/joblib/hashing.py:62: UserWarning: PicklingError while hashing {'b': 'first', 'a': <function <lambda> at 0x7f400d7ec8c8>}: PicklingError("Can't pickle <function <lambda> at 0x7f400d7ec8c8>: it's not found as __main__.<lambda>",) warnings.warn('PicklingError while hashing %r: %r' % (obj, e)) ________________________________________________________________________________ [Memory] Calling __main__--tmp-test_hash_non_picklable_arguments.f... f(<function <lambda> at 0x7f400d7ec8c8>, 'first') ________________________________________________________________f - 0.0s, 0.0min first /home/lesteve/dev/joblib/joblib/hashing.py:62: UserWarning: PicklingError while hashing {'b': 'second', 'a': <function <lambda> at 0x7f400d7ec8c8>}: PicklingError("Can't pickle <function <lambda> at 0x7f400d7ec8c8>: it's not found as __main__.<lambda>",) warnings.warn('PicklingError while hashing %r: %r' % (obj, e)) first ``` Why not just raise an exception in this case rather than returning a result with a warning that is almost certain to be ignored by the user ? @GaelVaroquaux @ogrisel.
joblib/joblib
diff --git a/joblib/test/test_hashing.py b/joblib/test/test_hashing.py index f0ce0eb..88407d0 100644 --- a/joblib/test/test_hashing.py +++ b/joblib/test/test_hashing.py @@ -23,6 +23,7 @@ from nose.tools import assert_equal from joblib.hashing import hash, PY3 from joblib.func_inspect import filter_args from joblib.memory import Memory +from joblib.testing import assert_raises_regex from joblib.test.test_memory import env as test_memory_env from joblib.test.test_memory import setup_module as test_memory_setup_func @@ -429,3 +430,12 @@ def test_hashes_stay_the_same_with_numpy_objects(): for to_hash, expected in zip(to_hash_list, expected_list): yield assert_equal, hash(to_hash), expected + + +def test_hashing_pickling_error(): + def non_picklable(): + return 42 + + assert_raises_regex(pickle.PicklingError, + 'PicklingError while hashing', + hash, non_picklable) diff --git a/joblib/testing.py b/joblib/testing.py index 8555a5a..e5cbae5 100644 --- a/joblib/testing.py +++ b/joblib/testing.py @@ -5,7 +5,7 @@ Helper for testing. import sys import warnings import os.path - +import re def warnings_to_stdout(): """ Redirect all warnings to stdout. @@ -17,3 +17,30 @@ def warnings_to_stdout(): warnings.showwarning = showwarning #warnings.simplefilter('always') + + +try: + from nose.tools import assert_raises_regex +except ImportError: + # For Python 2.7 + try: + from nose.tools import assert_raises_regexp as assert_raises_regex + except ImportError: + # for Python 2.6 + def assert_raises_regex(expected_exception, expected_regexp, + callable_obj=None, *args, **kwargs): + """Helper function to check for message patterns in exceptions""" + + not_raised = False + try: + callable_obj(*args, **kwargs) + not_raised = True + except Exception as e: + error_message = str(e) + if not re.compile(expected_regexp).search(error_message): + raise AssertionError("Error message should match pattern " + "%r. %r does not." % + (expected_regexp, error_message)) + if not_raised: + raise AssertionError("Should have raised %r" % + expected_exception(expected_regexp))
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 1 }
0.9
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "", "pip_packages": [ "nose", "coverage", "numpy>=1.6.1", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.5", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 coverage==6.2 importlib-metadata==4.8.3 iniconfig==1.1.1 -e git+https://github.com/joblib/joblib.git@484405ccea3cbcbd95e3cf241f15bf3eeb1aa8b6#egg=joblib nose==1.3.7 numpy==1.19.5 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: joblib channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - coverage==6.2 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - nose==1.3.7 - numpy==1.19.5 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/joblib
[ "joblib/test/test_hashing.py::test_hashing_pickling_error" ]
[]
[ "joblib/test/test_hashing.py::test_memory_setup_func", "joblib/test/test_hashing.py::test_memory_teardown_func", "joblib/test/test_hashing.py::test_hash_methods", "joblib/test/test_hashing.py::test_numpy_datetime_array", "joblib/test/test_hashing.py::test_hash_numpy_noncontiguous", "joblib/test/test_hashing.py::test_hash_numpy_performance", "joblib/test/test_hashing.py::test_bound_methods_hash", "joblib/test/test_hashing.py::test_bound_cached_methods_hash", "joblib/test/test_hashing.py::test_hash_object_dtype", "joblib/test/test_hashing.py::test_numpy_scalar", "joblib/test/test_hashing.py::test_dict_hash", "joblib/test/test_hashing.py::test_set_hash", "joblib/test/test_hashing.py::test_string", "joblib/test/test_hashing.py::test_dtype" ]
[]
BSD 3-Clause "New" or "Revised" License
310
mozilla__puente-52
a7d648b09a9b28feafdec48492aa1722d5add9ff
2015-11-23 22:58:31
f78d702e0d1376425d8d613a6573a896fc8d11a1
diff --git a/puente/commands.py b/puente/commands.py index a36d035..c21cdd3 100644 --- a/puente/commands.py +++ b/puente/commands.py @@ -129,10 +129,11 @@ def extract_command(outputdir, domain_methods, text_domain, keywords, print('Done') -def merge_command(create, base_dir, domain_methods, languages): +def merge_command(create, backup, base_dir, domain_methods, languages): """ :arg create: whether or not to create directories if they don't exist + :arg backup: whether or not to create backup .po files :arg base_dir: BASE_DIR setting :arg domain_methods: DOMAIN_METHODS setting :arg languages: LANGUAGES setting @@ -213,6 +214,7 @@ def merge_command(create, base_dir, domain_methods, languages): 'msgmerge', '--update', '--width=200', + '--backup=%s' % ('simple' if backup else 'off'), domain_po, '-' ] diff --git a/puente/management/commands/merge.py b/puente/management/commands/merge.py index 74de7d9..acf9b26 100644 --- a/puente/management/commands/merge.py +++ b/puente/management/commands/merge.py @@ -32,11 +32,17 @@ class Command(BaseCommand): action='store_true', dest='create', default=False, help='Create locale subdirectories' ), + make_option( + '-b', '--backup', + action='store_true', dest='backup', default=False, + help='Create backup files of .po files' + ), ) def handle(self, *args, **options): return merge_command( create=options.get('create'), + backup=options.get('backup'), base_dir=get_setting('BASE_DIR'), domain_methods=get_setting('DOMAIN_METHODS'), languages=getattr(settings, 'LANGUAGES', [])
Add option to not create merge backup According to https://www.gnu.org/software/gettext/manual/html_node/msgmerge-Invocation.html that should be `--backup=off` to the `msgmerge` tool.
mozilla/puente
diff --git a/tests/test_merge.py b/tests/test_merge.py index 29e1456..60b46f1 100644 --- a/tests/test_merge.py +++ b/tests/test_merge.py @@ -65,6 +65,7 @@ class TestMergecommand: merge_command( create=True, + backup=True, base_dir=str(tmpdir), domain_methods={ 'django': [ @@ -83,6 +84,7 @@ class TestMergecommand: with pytest.raises(CommandError): merge_command( create=True, + backup=True, base_dir=str(tmpdir), domain_methods={ 'django': [
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 2 }
0.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-pythonpath", "pytest-django" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements.txt", "requirements-dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.16 asgiref==3.8.1 attrs==25.3.0 babel==2.17.0 backports.tarfile==1.2.0 build==1.2.2.post1 cachetools==5.5.2 certifi==2025.1.31 cffi==1.17.1 chardet==5.2.0 charset-normalizer==3.4.1 check-manifest==0.50 colorama==0.4.6 cryptography==44.0.2 distlib==0.3.9 Django==4.2.20 django-jinja==2.11.0 docutils==0.21.2 filelock==3.18.0 id==1.5.0 idna==3.10 imagesize==1.4.1 importlib_metadata==8.6.1 iniconfig==2.1.0 jaraco.classes==3.4.0 jaraco.context==6.0.1 jaraco.functools==4.1.0 jeepney==0.9.0 Jinja2==3.1.6 keyring==25.6.0 markdown-it-py==3.0.0 MarkupSafe==3.0.2 mdurl==0.1.2 more-itertools==10.6.0 nh3==0.2.21 packaging==24.2 platformdirs==4.3.7 pluggy==1.5.0 -e git+https://github.com/mozilla/puente.git@a7d648b09a9b28feafdec48492aa1722d5add9ff#egg=puente py==1.11.0 pycparser==2.22 Pygments==2.19.1 pyproject-api==1.9.0 pyproject_hooks==1.2.0 pytest==6.2.5 pytest-django==4.5.2 pytest-pythonpath==0.7.4 readme_renderer==44.0 requests==2.32.3 requests-toolbelt==1.0.0 rfc3986==2.0.0 rich==14.0.0 SecretStorage==3.3.3 snowballstemmer==2.2.0 Sphinx==7.4.7 sphinxcontrib-applehelp==2.0.0 sphinxcontrib-devhelp==2.0.0 sphinxcontrib-htmlhelp==2.1.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==2.0.0 sphinxcontrib-serializinghtml==2.0.0 sqlparse==0.5.3 swebench_matterhorn @ file:///swebench_matterhorn toml==0.10.2 tomli==2.2.1 tox==4.25.0 twine==6.1.0 typing_extensions==4.13.0 urllib3==2.3.0 virtualenv==20.29.3 zipp==3.21.0
name: puente channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.16 - asgiref==3.8.1 - attrs==25.3.0 - babel==2.17.0 - backports-tarfile==1.2.0 - build==1.2.2.post1 - cachetools==5.5.2 - certifi==2025.1.31 - cffi==1.17.1 - chardet==5.2.0 - charset-normalizer==3.4.1 - check-manifest==0.50 - colorama==0.4.6 - cryptography==44.0.2 - distlib==0.3.9 - django==4.2.20 - django-jinja==2.11.0 - docutils==0.21.2 - filelock==3.18.0 - id==1.5.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - jaraco-classes==3.4.0 - jaraco-context==6.0.1 - jaraco-functools==4.1.0 - jeepney==0.9.0 - jinja2==3.1.6 - keyring==25.6.0 - markdown-it-py==3.0.0 - markupsafe==3.0.2 - mdurl==0.1.2 - more-itertools==10.6.0 - nh3==0.2.21 - packaging==24.2 - platformdirs==4.3.7 - pluggy==1.5.0 - py==1.11.0 - pycparser==2.22 - pygments==2.19.1 - pyproject-api==1.9.0 - pyproject-hooks==1.2.0 - pytest==6.2.5 - pytest-django==4.5.2 - pytest-pythonpath==0.7.4 - readme-renderer==44.0 - requests==2.32.3 - requests-toolbelt==1.0.0 - rfc3986==2.0.0 - rich==14.0.0 - secretstorage==3.3.3 - snowballstemmer==2.2.0 - sphinx==7.4.7 - sphinxcontrib-applehelp==2.0.0 - sphinxcontrib-devhelp==2.0.0 - sphinxcontrib-htmlhelp==2.1.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==2.0.0 - sphinxcontrib-serializinghtml==2.0.0 - sqlparse==0.5.3 - swebench-matterhorn==0.0.0 - toml==0.10.2 - tomli==2.2.1 - tox==4.25.0 - twine==6.1.0 - typing-extensions==4.13.0 - urllib3==2.3.0 - virtualenv==20.29.3 - zipp==3.21.0 prefix: /opt/conda/envs/puente
[ "tests/test_merge.py::TestMergecommand::test_missing_pot_file" ]
[ "tests/test_merge.py::TestManageMerge::test_help", "tests/test_merge.py::TestMergecommand::test_basic" ]
[]
[]
BSD 3-Clause "New" or "Revised" License
311
rmcgibbo__sphinxcontrib-autodoc_doxygen-9
412da0340505383830878da66c809e6466d017e0
2015-11-24 00:46:24
412da0340505383830878da66c809e6466d017e0
diff --git a/sphinxcontrib/autodoc_doxygen/autodoc.py b/sphinxcontrib/autodoc_doxygen/autodoc.py index 6bfbdd6..c32508b 100644 --- a/sphinxcontrib/autodoc_doxygen/autodoc.py +++ b/sphinxcontrib/autodoc_doxygen/autodoc.py @@ -154,7 +154,10 @@ class DoxygenMethodDocumenter(DoxygenDocumenter): return doc def format_name(self): - return self.object.find('definition').text + # return self.object.find('definition').text + rtype = '\n'.join(format_xml_paragraph(self.object.find('type'))).strip() + return (rtype and (rtype + ' ') or '') + self.objname + def format_signature(self): args = self.object.find('argsstring').text diff --git a/sphinxcontrib/autodoc_doxygen/xmlutils.py b/sphinxcontrib/autodoc_doxygen/xmlutils.py index 07567a4..31df3ad 100644 --- a/sphinxcontrib/autodoc_doxygen/xmlutils.py +++ b/sphinxcontrib/autodoc_doxygen/xmlutils.py @@ -60,7 +60,9 @@ class _DoxygenXmlParagraphFormatter(object): val.extend((' <', real_name, '>`')) else: val.append('`') - val.append(node.tail) + if node.tail is not None: + val.append(node.tail) + self.lines[-1] += ''.join(val) def visit_para(self, node):
Omit namespace and class from method names I did this by changing line 134 of autodoc.py from return (rtype and (rtype + ' ') or '') + self.modname + '::' + self.objpath to return (rtype and (rtype + ' ') or '') + self.objpath You mentioned before that this caused links to methods to be broken, but as far as I can tell it seems to work fine. By the way, all the examples at https://rawgit.com/rmcgibbo/sphinxcontrib-autodoc_doxygen/gh-pages/autodoxyclass.html include the option `:members:`. When I include that, I get the error message ERROR: An option to autodoxyclass is either unknown or has an invalid value: 'members' If I omit it, everything seems to work correctly.
rmcgibbo/sphinxcontrib-autodoc_doxygen
diff --git a/tests/test_format_xml_paragraph.py b/tests/test_format_xml_paragraph.py index 66fb8e7..b795b7b 100644 --- a/tests/test_format_xml_paragraph.py +++ b/tests/test_format_xml_paragraph.py @@ -76,4 +76,11 @@ The above example has two problems. First, it does not respect distance constrai integrator.addConstrainVelocities(); """ - assert '\n'.join(format_xml_paragraph(node)) == expected \ No newline at end of file + assert '\n'.join(format_xml_paragraph(node)) == expected + + +def test_3(): + node = ET.fromstring(""" +<type><ref refid="classOpenMM_1_1CustomHbondForce_1afefd9143292586209274d8e355d8cba1" kindref="member">NonbondedMethod</ref></type>""") + expected = ':cpp:any:`NonbondedMethod`' + assert '\n'.join(format_xml_paragraph(node)) == expected
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 2 }
0.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.16 babel==2.17.0 docutils==0.21.2 exceptiongroup==1.2.2 iniconfig==2.1.0 Jinja2==3.1.6 lxml==5.3.1 MarkupSafe==3.0.2 packaging==24.2 pluggy==1.5.0 Pygments==2.19.1 pytest==8.3.5 six==1.17.0 snowballstemmer==2.2.0 Sphinx==1.3.1 sphinx-rtd-theme==0.1.9 -e git+https://github.com/rmcgibbo/sphinxcontrib-autodoc_doxygen.git@412da0340505383830878da66c809e6466d017e0#egg=sphinxcontrib_autodoc_doxygen tomli==2.2.1
name: sphinxcontrib-autodoc_doxygen channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.16 - babel==2.17.0 - docutils==0.21.2 - exceptiongroup==1.2.2 - iniconfig==2.1.0 - jinja2==3.1.6 - lxml==5.3.1 - markupsafe==3.0.2 - packaging==24.2 - pluggy==1.5.0 - pygments==2.19.1 - pytest==8.3.5 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==1.3.1 - sphinx-rtd-theme==0.1.9 - tomli==2.2.1 prefix: /opt/conda/envs/sphinxcontrib-autodoc_doxygen
[ "tests/test_format_xml_paragraph.py::test_3" ]
[]
[ "tests/test_format_xml_paragraph.py::test_1", "tests/test_format_xml_paragraph.py::test_2" ]
[]
MIT License
312
docker__docker-py-861
1ca2bc58f0cf2e2cdda2734395bd3e7ad9b178bf
2015-11-24 02:33:41
1ca2bc58f0cf2e2cdda2734395bd3e7ad9b178bf
diff --git a/docker/api/image.py b/docker/api/image.py index f891e210..8493b38d 100644 --- a/docker/api/image.py +++ b/docker/api/image.py @@ -158,8 +158,6 @@ class ImageApiMixin(object): if not tag: repository, tag = utils.parse_repository_tag(repository) registry, repo_name = auth.resolve_repository_name(repository) - if repo_name.count(":") == 1: - repository, tag = repository.rsplit(":", 1) params = { 'tag': tag, @@ -174,7 +172,8 @@ class ImageApiMixin(object): log.debug('Looking for auth config') if not self._auth_configs: log.debug( - "No auth config in memory - loading from filesystem") + "No auth config in memory - loading from filesystem" + ) self._auth_configs = auth.load_config() authcfg = auth.resolve_authconfig(self._auth_configs, registry) # Do not fail here if no authentication exists for this diff --git a/docker/auth/auth.py b/docker/auth/auth.py index 416dd7c4..f771dedd 100644 --- a/docker/auth/auth.py +++ b/docker/auth/auth.py @@ -16,11 +16,9 @@ import base64 import json import logging import os -import warnings import six -from .. import constants from .. import errors INDEX_NAME = 'index.docker.io' @@ -31,31 +29,29 @@ LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg' log = logging.getLogger(__name__) -def resolve_repository_name(repo_name, insecure=False): - if insecure: - warnings.warn( - constants.INSECURE_REGISTRY_DEPRECATION_WARNING.format( - 'resolve_repository_name()' - ), DeprecationWarning - ) - +def resolve_repository_name(repo_name): if '://' in repo_name: raise errors.InvalidRepository( - 'Repository name cannot contain a scheme ({0})'.format(repo_name)) - parts = repo_name.split('/', 1) - if '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost': - # This is a docker index repo (ex: foo/bar or ubuntu) - return INDEX_NAME, repo_name - if len(parts) < 2: - raise errors.InvalidRepository( - 'Invalid repository name ({0})'.format(repo_name)) + 'Repository name cannot contain a scheme ({0})'.format(repo_name) + ) - if 'index.docker.io' in parts[0]: + index_name, remote_name = split_repo_name(repo_name) + if index_name[0] == '-' or index_name[-1] == '-': raise errors.InvalidRepository( - 'Invalid repository name, try "{0}" instead'.format(parts[1]) + 'Invalid index name ({0}). Cannot begin or end with a' + ' hyphen.'.format(index_name) ) + return index_name, remote_name + - return parts[0], parts[1] +def split_repo_name(repo_name): + parts = repo_name.split('/', 1) + if len(parts) == 1 or ( + '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost' + ): + # This is a docker index repo (ex: username/foobar or ubuntu) + return INDEX_NAME, repo_name + return tuple(parts) def resolve_authconfig(authconfig, registry=None): diff --git a/docker/utils/utils.py b/docker/utils/utils.py index 366f8696..560ee8e2 100644 --- a/docker/utils/utils.py +++ b/docker/utils/utils.py @@ -283,16 +283,14 @@ def convert_volume_binds(binds): return result -def parse_repository_tag(repo): - column_index = repo.rfind(':') - if column_index < 0: - return repo, None - tag = repo[column_index + 1:] - slash_index = tag.find('/') - if slash_index < 0: - return repo[:column_index], tag - - return repo, None +def parse_repository_tag(repo_name): + parts = repo_name.rsplit('@', 1) + if len(parts) == 2: + return tuple(parts) + parts = repo_name.rsplit(':', 1) + if len(parts) == 2 and '/' not in parts[1]: + return tuple(parts) + return repo_name, None # Based on utils.go:ParseHost http://tinyurl.com/nkahcfh
Can't pull images with . In name. Docker images that have a `.` in their name cannot be pulled with docker-py. This is a result of: https://github.com/docker/docker-py/blob/master/docker/auth/auth.py#L46
docker/docker-py
diff --git a/tests/unit/auth_test.py b/tests/unit/auth_test.py index 67830381..8e0b1d43 100644 --- a/tests/unit/auth_test.py +++ b/tests/unit/auth_test.py @@ -9,6 +9,7 @@ import shutil import tempfile from docker import auth +from docker import errors from .. import base @@ -29,25 +30,31 @@ class RegressionTest(base.BaseTestCase): assert b'_' in encoded -class ResolveAuthTest(base.BaseTestCase): - auth_config = { - 'https://index.docker.io/v1/': {'auth': 'indexuser'}, - 'my.registry.net': {'auth': 'privateuser'}, - 'http://legacy.registry.url/v1/': {'auth': 'legacyauth'} - } - +class ResolveRepositoryNameTest(base.BaseTestCase): def test_resolve_repository_name_hub_library_image(self): self.assertEqual( auth.resolve_repository_name('image'), ('index.docker.io', 'image'), ) + def test_resolve_repository_name_dotted_hub_library_image(self): + self.assertEqual( + auth.resolve_repository_name('image.valid'), + ('index.docker.io', 'image.valid') + ) + def test_resolve_repository_name_hub_image(self): self.assertEqual( auth.resolve_repository_name('username/image'), ('index.docker.io', 'username/image'), ) + def test_explicit_hub_index_library_image(self): + self.assertEqual( + auth.resolve_repository_name('index.docker.io/image'), + ('index.docker.io', 'image') + ) + def test_resolve_repository_name_private_registry(self): self.assertEqual( auth.resolve_repository_name('my.registry.net/image'), @@ -90,6 +97,20 @@ class ResolveAuthTest(base.BaseTestCase): ('localhost', 'username/image'), ) + def test_invalid_index_name(self): + self.assertRaises( + errors.InvalidRepository, + lambda: auth.resolve_repository_name('-gecko.com/image') + ) + + +class ResolveAuthTest(base.BaseTestCase): + auth_config = { + 'https://index.docker.io/v1/': {'auth': 'indexuser'}, + 'my.registry.net': {'auth': 'privateuser'}, + 'http://legacy.registry.url/v1/': {'auth': 'legacyauth'} + } + def test_resolve_authconfig_hostname_only(self): self.assertEqual( auth.resolve_authconfig(self.auth_config, 'my.registry.net'), diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py index 3c9f6e2f..57ad4435 100644 --- a/tests/unit/utils_test.py +++ b/tests/unit/utils_test.py @@ -352,23 +352,55 @@ class ParseHostTest(base.BaseTestCase): assert parse_host(val, 'win32') == tcp_port +class ParseRepositoryTagTest(base.BaseTestCase): + sha = 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + + def test_index_image_no_tag(self): + self.assertEqual( + parse_repository_tag("root"), ("root", None) + ) + + def test_index_image_tag(self): + self.assertEqual( + parse_repository_tag("root:tag"), ("root", "tag") + ) + + def test_index_user_image_no_tag(self): + self.assertEqual( + parse_repository_tag("user/repo"), ("user/repo", None) + ) + + def test_index_user_image_tag(self): + self.assertEqual( + parse_repository_tag("user/repo:tag"), ("user/repo", "tag") + ) + + def test_private_reg_image_no_tag(self): + self.assertEqual( + parse_repository_tag("url:5000/repo"), ("url:5000/repo", None) + ) + + def test_private_reg_image_tag(self): + self.assertEqual( + parse_repository_tag("url:5000/repo:tag"), ("url:5000/repo", "tag") + ) + + def test_index_image_sha(self): + self.assertEqual( + parse_repository_tag("root@sha256:{0}".format(self.sha)), + ("root", "sha256:{0}".format(self.sha)) + ) + + def test_private_reg_image_sha(self): + self.assertEqual( + parse_repository_tag("url:5000/repo@sha256:{0}".format(self.sha)), + ("url:5000/repo", "sha256:{0}".format(self.sha)) + ) + + class UtilsTest(base.BaseTestCase): longMessage = True - def test_parse_repository_tag(self): - self.assertEqual(parse_repository_tag("root"), - ("root", None)) - self.assertEqual(parse_repository_tag("root:tag"), - ("root", "tag")) - self.assertEqual(parse_repository_tag("user/repo"), - ("user/repo", None)) - self.assertEqual(parse_repository_tag("user/repo:tag"), - ("user/repo", "tag")) - self.assertEqual(parse_repository_tag("url:5000/repo"), - ("url:5000/repo", None)) - self.assertEqual(parse_repository_tag("url:5000/repo:tag"), - ("url:5000/repo", "tag")) - def test_parse_bytes(self): self.assertEqual(parse_bytes("512MB"), (536870912)) self.assertEqual(parse_bytes("512M"), (536870912))
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 3, "test_score": 3 }, "num_modified_files": 3 }
1.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "flake8" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
coverage==7.8.0 -e git+https://github.com/docker/docker-py.git@1ca2bc58f0cf2e2cdda2734395bd3e7ad9b178bf#egg=docker_py exceptiongroup==1.2.2 flake8==7.2.0 iniconfig==2.1.0 mccabe==0.7.0 packaging==24.2 pluggy==1.5.0 pycodestyle==2.13.0 pyflakes==3.3.2 pytest==8.3.5 pytest-cov==6.0.0 requests==2.5.3 six==1.17.0 tomli==2.2.1 websocket_client==0.32.0
name: docker-py channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.8.0 - exceptiongroup==1.2.2 - flake8==7.2.0 - iniconfig==2.1.0 - mccabe==0.7.0 - packaging==24.2 - pluggy==1.5.0 - pycodestyle==2.13.0 - pyflakes==3.3.2 - pytest==8.3.5 - pytest-cov==6.0.0 - requests==2.5.3 - six==1.17.0 - tomli==2.2.1 - websocket-client==0.32.0 prefix: /opt/conda/envs/docker-py
[ "tests/unit/auth_test.py::ResolveRepositoryNameTest::test_explicit_hub_index_library_image", "tests/unit/auth_test.py::ResolveRepositoryNameTest::test_invalid_index_name", "tests/unit/auth_test.py::ResolveRepositoryNameTest::test_resolve_repository_name_dotted_hub_library_image", "tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_image_sha", "tests/unit/utils_test.py::ParseRepositoryTagTest::test_private_reg_image_sha" ]
[]
[ "tests/unit/auth_test.py::RegressionTest::test_803_urlsafe_encode", "tests/unit/auth_test.py::ResolveRepositoryNameTest::test_resolve_repository_name_hub_image", "tests/unit/auth_test.py::ResolveRepositoryNameTest::test_resolve_repository_name_hub_library_image", "tests/unit/auth_test.py::ResolveRepositoryNameTest::test_resolve_repository_name_localhost", "tests/unit/auth_test.py::ResolveRepositoryNameTest::test_resolve_repository_name_localhost_with_username", "tests/unit/auth_test.py::ResolveRepositoryNameTest::test_resolve_repository_name_no_dots_but_port", "tests/unit/auth_test.py::ResolveRepositoryNameTest::test_resolve_repository_name_no_dots_but_port_and_username", "tests/unit/auth_test.py::ResolveRepositoryNameTest::test_resolve_repository_name_private_registry", "tests/unit/auth_test.py::ResolveRepositoryNameTest::test_resolve_repository_name_private_registry_with_port", "tests/unit/auth_test.py::ResolveRepositoryNameTest::test_resolve_repository_name_private_registry_with_username", "tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_default_explicit_none", "tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_default_registry", "tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_fully_explicit", "tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_hostname_only", "tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_legacy_config", "tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_no_match", "tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_no_path", "tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_no_path_trailing_slash", "tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_no_path_wrong_insecure_proto", "tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_no_path_wrong_secure_proto", "tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_no_protocol", "tests/unit/auth_test.py::ResolveAuthTest::test_resolve_authconfig_path_wrong_proto", "tests/unit/auth_test.py::ResolveAuthTest::test_resolve_registry_and_auth_hub_image", "tests/unit/auth_test.py::ResolveAuthTest::test_resolve_registry_and_auth_library_image", "tests/unit/auth_test.py::ResolveAuthTest::test_resolve_registry_and_auth_private_registry", "tests/unit/auth_test.py::ResolveAuthTest::test_resolve_registry_and_auth_unauthenticated_registry", "tests/unit/auth_test.py::LoadConfigTest::test_load_config", "tests/unit/auth_test.py::LoadConfigTest::test_load_config_custom_config_env", "tests/unit/auth_test.py::LoadConfigTest::test_load_config_custom_config_env_utf8", "tests/unit/auth_test.py::LoadConfigTest::test_load_config_custom_config_env_with_auths", "tests/unit/auth_test.py::LoadConfigTest::test_load_config_no_file", "tests/unit/auth_test.py::LoadConfigTest::test_load_config_with_random_name", "tests/unit/utils_test.py::HostConfigTest::test_create_host_config_invalid_cpu_cfs_types", "tests/unit/utils_test.py::HostConfigTest::test_create_host_config_no_options", "tests/unit/utils_test.py::HostConfigTest::test_create_host_config_no_options_newer_api_version", "tests/unit/utils_test.py::HostConfigTest::test_create_host_config_with_cpu_period", "tests/unit/utils_test.py::HostConfigTest::test_create_host_config_with_cpu_quota", "tests/unit/utils_test.py::HostConfigTest::test_create_host_config_with_oom_kill_disable", "tests/unit/utils_test.py::UlimitTest::test_create_host_config_dict_ulimit", "tests/unit/utils_test.py::UlimitTest::test_create_host_config_dict_ulimit_capitals", "tests/unit/utils_test.py::UlimitTest::test_create_host_config_obj_ulimit", "tests/unit/utils_test.py::UlimitTest::test_ulimit_invalid_type", "tests/unit/utils_test.py::LogConfigTest::test_create_host_config_dict_logconfig", "tests/unit/utils_test.py::LogConfigTest::test_create_host_config_obj_logconfig", "tests/unit/utils_test.py::LogConfigTest::test_logconfig_invalid_config_type", "tests/unit/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_empty", "tests/unit/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_no_cert_path", "tests/unit/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_tls", "tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_compact", "tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_complete", "tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_empty", "tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_list", "tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_no_mode", "tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_unicode_bytes_input", "tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_unicode_unicode_input", "tests/unit/utils_test.py::ParseEnvFileTest::test_parse_env_file_commented_line", "tests/unit/utils_test.py::ParseEnvFileTest::test_parse_env_file_invalid_line", "tests/unit/utils_test.py::ParseEnvFileTest::test_parse_env_file_proper", "tests/unit/utils_test.py::ParseHostTest::test_parse_host", "tests/unit/utils_test.py::ParseHostTest::test_parse_host_empty_value", "tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_image_no_tag", "tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_image_tag", "tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_user_image_no_tag", "tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_user_image_tag", "tests/unit/utils_test.py::ParseRepositoryTagTest::test_private_reg_image_no_tag", "tests/unit/utils_test.py::ParseRepositoryTagTest::test_private_reg_image_tag", "tests/unit/utils_test.py::UtilsTest::test_convert_filters", "tests/unit/utils_test.py::UtilsTest::test_decode_json_header", "tests/unit/utils_test.py::UtilsTest::test_parse_bytes", "tests/unit/utils_test.py::SplitCommandTest::test_split_command_with_unicode", "tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_matching_internal_port_ranges", "tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_matching_internal_ports", "tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_nonmatching_internal_port_ranges", "tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_nonmatching_internal_ports", "tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_one_port", "tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_port_range", "tests/unit/utils_test.py::PortsTest::test_host_only_with_colon", "tests/unit/utils_test.py::PortsTest::test_non_matching_length_port_ranges", "tests/unit/utils_test.py::PortsTest::test_port_and_range_invalid", "tests/unit/utils_test.py::PortsTest::test_port_only_with_colon", "tests/unit/utils_test.py::PortsTest::test_split_port_invalid", "tests/unit/utils_test.py::PortsTest::test_split_port_no_host_port", "tests/unit/utils_test.py::PortsTest::test_split_port_range_no_host_port", "tests/unit/utils_test.py::PortsTest::test_split_port_range_with_host_ip_no_port", "tests/unit/utils_test.py::PortsTest::test_split_port_range_with_host_port", "tests/unit/utils_test.py::PortsTest::test_split_port_range_with_protocol", "tests/unit/utils_test.py::PortsTest::test_split_port_with_host_ip", "tests/unit/utils_test.py::PortsTest::test_split_port_with_host_ip_no_port", "tests/unit/utils_test.py::PortsTest::test_split_port_with_host_port", "tests/unit/utils_test.py::PortsTest::test_split_port_with_protocol", "tests/unit/utils_test.py::ExcludePathsTest::test_directory", "tests/unit/utils_test.py::ExcludePathsTest::test_directory_with_single_exception", "tests/unit/utils_test.py::ExcludePathsTest::test_directory_with_subdir_exception", "tests/unit/utils_test.py::ExcludePathsTest::test_directory_with_trailing_slash", "tests/unit/utils_test.py::ExcludePathsTest::test_directory_with_wildcard_exception", "tests/unit/utils_test.py::ExcludePathsTest::test_exclude_custom_dockerfile", "tests/unit/utils_test.py::ExcludePathsTest::test_exclude_dockerfile_dockerignore", "tests/unit/utils_test.py::ExcludePathsTest::test_no_dupes", "tests/unit/utils_test.py::ExcludePathsTest::test_no_excludes", "tests/unit/utils_test.py::ExcludePathsTest::test_question_mark", "tests/unit/utils_test.py::ExcludePathsTest::test_single_filename", "tests/unit/utils_test.py::ExcludePathsTest::test_single_filename_trailing_slash", "tests/unit/utils_test.py::ExcludePathsTest::test_single_subdir_single_filename", "tests/unit/utils_test.py::ExcludePathsTest::test_single_subdir_wildcard_filename", "tests/unit/utils_test.py::ExcludePathsTest::test_subdirectory", "tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_exclude", "tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_filename_end", "tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_filename_start", "tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_subdir_single_filename", "tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_subdir_wildcard_filename", "tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_with_exception", "tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_with_wildcard_exception", "tests/unit/utils_test.py::TarTest::test_tar_with_directory_symlinks", "tests/unit/utils_test.py::TarTest::test_tar_with_empty_directory", "tests/unit/utils_test.py::TarTest::test_tar_with_excludes", "tests/unit/utils_test.py::TarTest::test_tar_with_file_symlinks" ]
[]
Apache License 2.0
313
docker__docker-py-863
28864df27b2cf289478d5fa9d5ca27a9f0daa9a8
2015-11-24 12:06:48
2f2d50d0c7be5882b150f6ff3bae31d469720e5b
aanand: Nice. I agree with @kanzure's comments, plus it'd be good to document the logic of `should_include`. thomasboyt: @aanand cool, added some docs/comments! aanand: Thanks! I think this can be squashed to one commit.
diff --git a/docker/utils/utils.py b/docker/utils/utils.py index 9c4bb477..762b39a4 100644 --- a/docker/utils/utils.py +++ b/docker/utils/utils.py @@ -107,38 +107,68 @@ def exclude_paths(root, patterns, dockerfile=None): exclude_patterns = list(set(patterns) - set(exceptions)) - all_paths = get_paths(root) - - # Remove all paths that are matched by any exclusion pattern - paths = [ - p for p in all_paths - if not any(match_path(p, pattern) for pattern in exclude_patterns) - ] - - # Add back the set of paths that are matched by any inclusion pattern. - # Include parent dirs - if we add back 'foo/bar', add 'foo' as well - for p in all_paths: - if any(match_path(p, pattern) for pattern in include_patterns): - components = p.split('/') - paths += [ - '/'.join(components[:end]) - for end in range(1, len(components) + 1) - ] + paths = get_paths(root, exclude_patterns, include_patterns, + has_exceptions=len(exceptions) > 0) return set(paths) -def get_paths(root): +def should_include(path, exclude_patterns, include_patterns): + """ + Given a path, a list of exclude patterns, and a list of inclusion patterns: + + 1. Returns True if the path doesn't match any exclusion pattern + 2. Returns False if the path matches an exclusion pattern and doesn't match + an inclusion pattern + 3. Returns true if the path matches an exclusion pattern and matches an + inclusion pattern + """ + for pattern in exclude_patterns: + if match_path(path, pattern): + for pattern in include_patterns: + if match_path(path, pattern): + return True + return False + return True + + +def get_paths(root, exclude_patterns, include_patterns, has_exceptions=False): paths = [] - for parent, dirs, files in os.walk(root, followlinks=False): + for parent, dirs, files in os.walk(root, topdown=True, followlinks=False): parent = os.path.relpath(parent, root) if parent == '.': parent = '' + + # If exception rules exist, we can't skip recursing into ignored + # directories, as we need to look for exceptions in them. + # + # It may be possible to optimize this further for exception patterns + # that *couldn't* match within ignored directores. + # + # This matches the current docker logic (as of 2015-11-24): + # https://github.com/docker/docker/blob/37ba67bf636b34dc5c0c0265d62a089d0492088f/pkg/archive/archive.go#L555-L557 + + if not has_exceptions: + + # Remove excluded patterns from the list of directories to traverse + # by mutating the dirs we're iterating over. + # This looks strange, but is considered the correct way to skip + # traversal. See https://docs.python.org/2/library/os.html#os.walk + + dirs[:] = [d for d in dirs if + should_include(os.path.join(parent, d), + exclude_patterns, include_patterns)] + for path in dirs: - paths.append(os.path.join(parent, path)) + if should_include(os.path.join(parent, path), + exclude_patterns, include_patterns): + paths.append(os.path.join(parent, path)) + for path in files: - paths.append(os.path.join(parent, path)) + if should_include(os.path.join(parent, path), + exclude_patterns, include_patterns): + paths.append(os.path.join(parent, path)) return paths
dockerignore implementation is relatively slow compared to Docker's implementation I ran into an issue in a project where my builds - run through `docker-compose` - seemed to be taking an awfully long time (around ~60 seconds) during the context build/upload stage. `strace` showed a ton of time was being spent `stat()`ing files that were included in my `.dockerignore` rules, which I found curious. Oddly, when I simply used `docker build` to build the container, I didn't have this issue, and context build/upload took about ~3-5 seconds. I couldn't figure out what was going wrong, so I investigated `docker-py`, and found that almost all of my execution time was spent in [this `get_paths` call](https://github.com/docker/docker-py/blob/master/docker/utils/utils.py#L110). It appears that the difference in execution time is because docker-py's implementation of dockerignore/tar exclusion is far slower than Docker's: Docker's implementation of the dockerignore exclusion algorithm, (seen [here](https://github.com/docker/docker/blob/master/pkg/archive/archive.go#L518)), walks through each folder, but [does not descend into a directory if it matched an exclusion pattern](https://github.com/docker/docker/blob/master/pkg/archive/archive.go#L556). Meanwhile, docker-py first gets an array of *every single file in the context folder*, and then applies a filter to the array. This seems to be what is causing the massive difference in execution time when I build my project - docker-py is iterating over thousands of files that Docker correctly ignores. I started on a fix, using what I believe are the same rules as Docker's algorithm: https://github.com/thomasboyt/docker-py/commit/9f302f6721bb8492140cf5b218a80d62a2b62e19 This runs just as fast as Docker's implementation, but doesn't fully implement exception rules (e.g. `!foo`), leading it to fail a few tests. Before I go through and add this feature, I wanted to confirm that I'm on the right path (and that no one else has a better solution/algorithm to apply).
docker/docker-py
diff --git a/tests/integration/build_test.py b/tests/integration/build_test.py index 011ddc3e..26164ae0 100644 --- a/tests/integration/build_test.py +++ b/tests/integration/build_test.py @@ -65,6 +65,7 @@ class BuildTest(helpers.BaseTestCase): 'ignored', 'Dockerfile', '.dockerignore', + '!ignored/subdir/excepted-file', '', # empty line ])) @@ -76,6 +77,9 @@ class BuildTest(helpers.BaseTestCase): with open(os.path.join(subdir, 'file'), 'w') as f: f.write("this file should be ignored") + with open(os.path.join(subdir, 'excepted-file'), 'w') as f: + f.write("this file should not be ignored") + tag = 'docker-py-test-build-with-dockerignore' stream = self.client.build( path=base_dir, @@ -84,7 +88,7 @@ class BuildTest(helpers.BaseTestCase): for chunk in stream: pass - c = self.client.create_container(tag, ['ls', '-1A', '/test']) + c = self.client.create_container(tag, ['find', '/test', '-type', 'f']) self.client.start(c) self.client.wait(c) logs = self.client.logs(c) @@ -93,8 +97,9 @@ class BuildTest(helpers.BaseTestCase): logs = logs.decode('utf-8') self.assertEqual( - list(filter(None, logs.split('\n'))), - ['not-ignored'], + sorted(list(filter(None, logs.split('\n')))), + sorted(['/test/ignored/subdir/excepted-file', + '/test/not-ignored']), ) @requires_api_version('1.21') diff --git a/tests/unit/utils_test.py b/tests/unit/utils_test.py index 57ad4435..a68e1e78 100644 --- a/tests/unit/utils_test.py +++ b/tests/unit/utils_test.py @@ -671,17 +671,17 @@ class ExcludePathsTest(base.BaseTestCase): def test_directory_with_single_exception(self): assert self.exclude(['foo', '!foo/bar/a.py']) == self.all_paths - set([ - 'foo/a.py', 'foo/b.py', + 'foo/a.py', 'foo/b.py', 'foo', 'foo/bar' ]) def test_directory_with_subdir_exception(self): assert self.exclude(['foo', '!foo/bar']) == self.all_paths - set([ - 'foo/a.py', 'foo/b.py', + 'foo/a.py', 'foo/b.py', 'foo' ]) def test_directory_with_wildcard_exception(self): assert self.exclude(['foo', '!foo/*.py']) == self.all_paths - set([ - 'foo/bar', 'foo/bar/a.py', + 'foo/bar', 'foo/bar/a.py', 'foo' ]) def test_subdirectory(self):
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 0, "test_score": 1 }, "num_modified_files": 1 }
1.6
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
coverage==7.8.0 -e git+https://github.com/docker/docker-py.git@28864df27b2cf289478d5fa9d5ca27a9f0daa9a8#egg=docker_py exceptiongroup==1.2.2 execnet==2.1.1 iniconfig==2.1.0 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 pytest-asyncio==0.26.0 pytest-cov==6.0.0 pytest-mock==3.14.0 pytest-xdist==3.6.1 requests==2.5.3 six==1.17.0 tomli==2.2.1 typing_extensions==4.13.0 websocket_client==0.32.0
name: docker-py channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.8.0 - exceptiongroup==1.2.2 - execnet==2.1.1 - iniconfig==2.1.0 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - pytest-asyncio==0.26.0 - pytest-cov==6.0.0 - pytest-mock==3.14.0 - pytest-xdist==3.6.1 - requests==2.5.3 - six==1.17.0 - tomli==2.2.1 - typing-extensions==4.13.0 - websocket-client==0.32.0 prefix: /opt/conda/envs/docker-py
[ "tests/unit/utils_test.py::ExcludePathsTest::test_directory_with_single_exception", "tests/unit/utils_test.py::ExcludePathsTest::test_directory_with_subdir_exception", "tests/unit/utils_test.py::ExcludePathsTest::test_directory_with_wildcard_exception" ]
[]
[ "tests/unit/utils_test.py::HostConfigTest::test_create_host_config_invalid_cpu_cfs_types", "tests/unit/utils_test.py::HostConfigTest::test_create_host_config_no_options", "tests/unit/utils_test.py::HostConfigTest::test_create_host_config_no_options_newer_api_version", "tests/unit/utils_test.py::HostConfigTest::test_create_host_config_with_cpu_period", "tests/unit/utils_test.py::HostConfigTest::test_create_host_config_with_cpu_quota", "tests/unit/utils_test.py::HostConfigTest::test_create_host_config_with_oom_kill_disable", "tests/unit/utils_test.py::UlimitTest::test_create_host_config_dict_ulimit", "tests/unit/utils_test.py::UlimitTest::test_create_host_config_dict_ulimit_capitals", "tests/unit/utils_test.py::UlimitTest::test_create_host_config_obj_ulimit", "tests/unit/utils_test.py::UlimitTest::test_ulimit_invalid_type", "tests/unit/utils_test.py::LogConfigTest::test_create_host_config_dict_logconfig", "tests/unit/utils_test.py::LogConfigTest::test_create_host_config_obj_logconfig", "tests/unit/utils_test.py::LogConfigTest::test_logconfig_invalid_config_type", "tests/unit/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_empty", "tests/unit/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_no_cert_path", "tests/unit/utils_test.py::KwargsFromEnvTest::test_kwargs_from_env_tls", "tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_compact", "tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_complete", "tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_empty", "tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_list", "tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_no_mode", "tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_unicode_bytes_input", "tests/unit/utils_test.py::ConverVolumeBindsTest::test_convert_volume_binds_unicode_unicode_input", "tests/unit/utils_test.py::ParseEnvFileTest::test_parse_env_file_commented_line", "tests/unit/utils_test.py::ParseEnvFileTest::test_parse_env_file_invalid_line", "tests/unit/utils_test.py::ParseEnvFileTest::test_parse_env_file_proper", "tests/unit/utils_test.py::ParseHostTest::test_parse_host", "tests/unit/utils_test.py::ParseHostTest::test_parse_host_empty_value", "tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_image_no_tag", "tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_image_sha", "tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_image_tag", "tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_user_image_no_tag", "tests/unit/utils_test.py::ParseRepositoryTagTest::test_index_user_image_tag", "tests/unit/utils_test.py::ParseRepositoryTagTest::test_private_reg_image_no_tag", "tests/unit/utils_test.py::ParseRepositoryTagTest::test_private_reg_image_sha", "tests/unit/utils_test.py::ParseRepositoryTagTest::test_private_reg_image_tag", "tests/unit/utils_test.py::UtilsTest::test_convert_filters", "tests/unit/utils_test.py::UtilsTest::test_decode_json_header", "tests/unit/utils_test.py::UtilsTest::test_parse_bytes", "tests/unit/utils_test.py::SplitCommandTest::test_split_command_with_unicode", "tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_matching_internal_port_ranges", "tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_matching_internal_ports", "tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_nonmatching_internal_port_ranges", "tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_nonmatching_internal_ports", "tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_one_port", "tests/unit/utils_test.py::PortsTest::test_build_port_bindings_with_port_range", "tests/unit/utils_test.py::PortsTest::test_host_only_with_colon", "tests/unit/utils_test.py::PortsTest::test_non_matching_length_port_ranges", "tests/unit/utils_test.py::PortsTest::test_port_and_range_invalid", "tests/unit/utils_test.py::PortsTest::test_port_only_with_colon", "tests/unit/utils_test.py::PortsTest::test_split_port_invalid", "tests/unit/utils_test.py::PortsTest::test_split_port_no_host_port", "tests/unit/utils_test.py::PortsTest::test_split_port_range_no_host_port", "tests/unit/utils_test.py::PortsTest::test_split_port_range_with_host_ip_no_port", "tests/unit/utils_test.py::PortsTest::test_split_port_range_with_host_port", "tests/unit/utils_test.py::PortsTest::test_split_port_range_with_protocol", "tests/unit/utils_test.py::PortsTest::test_split_port_with_host_ip", "tests/unit/utils_test.py::PortsTest::test_split_port_with_host_ip_no_port", "tests/unit/utils_test.py::PortsTest::test_split_port_with_host_port", "tests/unit/utils_test.py::PortsTest::test_split_port_with_protocol", "tests/unit/utils_test.py::ExcludePathsTest::test_directory", "tests/unit/utils_test.py::ExcludePathsTest::test_directory_with_trailing_slash", "tests/unit/utils_test.py::ExcludePathsTest::test_exclude_custom_dockerfile", "tests/unit/utils_test.py::ExcludePathsTest::test_exclude_dockerfile_dockerignore", "tests/unit/utils_test.py::ExcludePathsTest::test_no_dupes", "tests/unit/utils_test.py::ExcludePathsTest::test_no_excludes", "tests/unit/utils_test.py::ExcludePathsTest::test_question_mark", "tests/unit/utils_test.py::ExcludePathsTest::test_single_filename", "tests/unit/utils_test.py::ExcludePathsTest::test_single_filename_trailing_slash", "tests/unit/utils_test.py::ExcludePathsTest::test_single_subdir_single_filename", "tests/unit/utils_test.py::ExcludePathsTest::test_single_subdir_wildcard_filename", "tests/unit/utils_test.py::ExcludePathsTest::test_subdirectory", "tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_exclude", "tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_filename_end", "tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_filename_start", "tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_subdir_single_filename", "tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_subdir_wildcard_filename", "tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_with_exception", "tests/unit/utils_test.py::ExcludePathsTest::test_wildcard_with_wildcard_exception", "tests/unit/utils_test.py::TarTest::test_tar_with_directory_symlinks", "tests/unit/utils_test.py::TarTest::test_tar_with_empty_directory", "tests/unit/utils_test.py::TarTest::test_tar_with_excludes", "tests/unit/utils_test.py::TarTest::test_tar_with_file_symlinks" ]
[]
Apache License 2.0
314
getlogbook__logbook-176
f4d4d9309d0a0ce097cfa52f0f3dad6280d7f2e3
2015-11-26 23:39:36
bb0f4fbeec318a140780b1ac8781599474cf2666
diff --git a/logbook/compat.py b/logbook/compat.py index c3896db..b65ac00 100644 --- a/logbook/compat.py +++ b/logbook/compat.py @@ -9,12 +9,13 @@ :copyright: (c) 2010 by Armin Ronacher, Georg Brandl. :license: BSD, see LICENSE for more details. """ -import sys +import collections import logging +import sys import warnings -import logbook from datetime import date, datetime +import logbook from logbook.helpers import u, string_types, iteritems _epoch_ord = date(1970, 1, 1).toordinal() @@ -63,8 +64,12 @@ class redirected_logging(object): class LoggingCompatRecord(logbook.LogRecord): def _format_message(self, msg, *args, **kwargs): - assert not kwargs - return msg % tuple(args) + if kwargs: + assert not args + return msg % kwargs + else: + assert not kwargs + return msg % tuple(args) class RedirectLoggingHandler(logging.Handler): @@ -124,10 +129,17 @@ class RedirectLoggingHandler(logging.Handler): def convert_record(self, old_record): """Converts an old logging record into a logbook log record.""" + args = old_record.args + kwargs = None + + # Logging allows passing a mapping object, in which case args will be a mapping. + if isinstance(args, collections.Mapping): + kwargs = args + args = None record = LoggingCompatRecord(old_record.name, self.convert_level(old_record.levelno), - old_record.msg, old_record.args, - None, old_record.exc_info, + old_record.msg, args, + kwargs, old_record.exc_info, self.find_extra(old_record), self.find_caller(old_record)) record.time = self.convert_time(old_record.created)
Exception in LoggingCompatRecord for mapping keys Example: ``` logger = logging.getLogger("test") logger.setLevel("DEBUG") logger.addHandler(RedirectLoggingHandler()) with logbook.StderrHandler(): logger.debug("test map %(name)s", {"name": "mapname"}) # raise exception in LoggingCompatRecord: #Traceback (most recent call last): # File "D:\bin\Python34\lib\site-packages\logbook\base.py", line 515, in message # return self._format_message(self.msg, *self.args, **self.kwargs) # File "D:\bin\Python34\lib\site-packages\logbook\compat.py", line 66, in _format_message # return msg % tuple(args) # TypeError: format requires a mapping ``` The quote from "logging/\_\_init\_\_.py" > # # The following statement allows passing of a dictionary as a sole # argument, so that you can do something like # logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2}) # Suggested by Stefan Behnel. # Note that without the test for args[0], we get a problem because # during formatting, we test to see if the arg is present using # 'if self.args:'. If the event being logged is e.g. 'Value is %d' # and if the passed arg fails 'if self.args:' then no formatting # is done. For example, logger.warning('Value is %d', 0) would log # 'Value is %d' instead of 'Value is 0'. # For the use case of passing a dictionary, this should not be a # problem. # Issue #21172: a request was made to relax the isinstance check # to hasattr(args[0], '__getitem__'). However, the docs on string # formatting still seem to suggest a mapping object is required. # Thus, while not removing the isinstance check, it does now look # for collections.Mapping rather than, as before, dict. if (args and len(args) == 1 and isinstance(args[0], collections.Mapping) and args[0]): args = args[0]
getlogbook/logbook
diff --git a/tests/test_logging_compat.py b/tests/test_logging_compat.py index 48dfebe..31fdd40 100644 --- a/tests/test_logging_compat.py +++ b/tests/test_logging_compat.py @@ -36,8 +36,11 @@ def test_basic_compat(request, set_root_logger_level): logger.warn('This is from the old %s', 'system') logger.error('This is from the old system') logger.critical('This is from the old system') + logger.error('This is a %(what)s %(where)s', {'what': 'mapping', 'where': 'test'}) assert ('WARNING: %s: This is from the old system' % name) in captured.getvalue() + assert ('ERROR: %s: This is a mapping test' % + name) in captured.getvalue() if set_root_logger_level: assert handler.records[0].level == logbook.DEBUG else:
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 1 }, "num_modified_files": 1 }
0.12
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[all]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
async-timeout==5.0.1 Cython==3.0.12 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work execnet==2.1.1 greenlet==3.1.1 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work Jinja2==3.1.6 -e git+https://github.com/getlogbook/logbook.git@f4d4d9309d0a0ce097cfa52f0f3dad6280d7f2e3#egg=Logbook MarkupSafe==3.0.2 packaging @ file:///croot/packaging_1734472117206/work pluggy @ file:///croot/pluggy_1733169602837/work pytest @ file:///croot/pytest_1738938843180/work pyzmq==26.3.0 redis==5.2.1 SQLAlchemy==2.0.40 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing_extensions==4.13.0
name: logbook channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - async-timeout==5.0.1 - cython==3.0.12 - execnet==2.1.1 - greenlet==3.1.1 - jinja2==3.1.6 - markupsafe==3.0.2 - pyzmq==26.3.0 - redis==5.2.1 - sqlalchemy==2.0.40 - typing-extensions==4.13.0 prefix: /opt/conda/envs/logbook
[ "tests/test_logging_compat.py::test_basic_compat[True]", "tests/test_logging_compat.py::test_basic_compat[False]" ]
[]
[ "tests/test_logging_compat.py::test_redirect_logbook", "tests/test_logging_compat.py::test_warning_redirections" ]
[]
BSD License
315
mapbox__mapbox-sdk-py-76
ab45a8e1a40b5ecbe9e6c59002883e291856dcc9
2015-11-30 13:29:16
06728ffc30fba83003e9c76645ecec3eec1c63de
diff --git a/.travis.yml b/.travis.yml index d99e41d..e323672 100644 --- a/.travis.yml +++ b/.travis.yml @@ -15,6 +15,7 @@ env: install: - pip install -U pip --cache-dir $HOME/.pip-cache - pip install -e .[test] --cache-dir $HOME/.pip-cache + - if [[ $TRAVIS_PYTHON_VERSION == 2.6 ]]; then pip install ordereddict; fi script: - py.test --cov mapbox --cov-report term-missing - py.test --doctest-glob='*.md' docs/*.md diff --git a/README.rst b/README.rst index 61fc790..e15a4c9 100644 --- a/README.rst +++ b/README.rst @@ -31,6 +31,11 @@ Services - Forward (place names ⇢ longitude, latitude) - Reverse (longitude, latitude ⇢ place names) +- `Static Maps <https://www.mapbox.com/developers/api/static/>`__ + + - Generate standalone images from existing Mapbox mapids + - Render with GeoJSON overlays + - `Surface <https://www.mapbox.com/developers/api/surface/>`__ - Interpolates values along lines. Useful for elevation traces. @@ -138,6 +143,20 @@ which returns:: See ``import mapbox; help(mapbox.Distance)`` for more detailed usage. +Static Maps +----------- +Static maps are standalone images that can be displayed on web and mobile devices without the aid of a mapping library or API. Static maps can display GeoJSON overlays and the `simplestyle-spec <https://github.com/mapbox/simplestyle-spec>`_ styles will be respected and rendered. + +.. code:: python + + from mapbox import Static + res = Static().image('mapbox.satellite', + lon=-61.7, lat=12.1, z=12, + features=list_of_points) + + with open('map.png', 'wb') as output: + output.write(res.content) + Surface ------- diff --git a/mapbox/__init__.py b/mapbox/__init__.py index 5743280..5decf79 100644 --- a/mapbox/__init__.py +++ b/mapbox/__init__.py @@ -7,3 +7,4 @@ from .services.distance import Distance from .services.geocoding import Geocoder, InvalidPlaceTypeError from .services.surface import Surface from .services.uploads import Uploader +from .services.static import Static diff --git a/mapbox/services/static.py b/mapbox/services/static.py new file mode 100644 index 0000000..8e9febe --- /dev/null +++ b/mapbox/services/static.py @@ -0,0 +1,58 @@ +import json + +from uritemplate import URITemplate + +from mapbox.services.base import Service + + +class Static(Service): + + def __init__(self, access_token=None): + self.baseuri = 'https://api.mapbox.com/v4' + self.session = self.get_session(access_token) + + def image(self, mapid, lon=None, lat=None, z=None, features=None, + width=600, height=600, image_format='png256'): + + if lon and lat and z: + auto = False + else: + auto = True + + values = dict( + mapid=mapid, + lon=str(lon), + lat=str(lat), + z=str(z), + width=str(width), + height=str(height), + format=image_format) + + if features: + values['overlay'] = json.dumps({'type': 'FeatureCollection', + 'features': features}) + + if len(values['overlay']) > 4087: # limit is 4096 minus the 'geojson()' + raise ValueError("geojson is too large for the static maps API, " + "must be less than 4096 characters") + + if auto: + uri = URITemplate( + '%s/{mapid}/geojson({overlay})/auto/{width}x{height}.{format}' % + self.baseuri).expand(**values) + else: + uri = URITemplate( + '%s/{mapid}/geojson({overlay})/{lon},{lat},{z}/{width}x{height}.{format}' % + self.baseuri).expand(**values) + else: + if auto: + raise ValueError("Must provide features if lat, lon, z are None") + + # No overlay + uri = URITemplate( + '%s/{mapid}/{lon},{lat},{z}/{width}x{height}.{format}' % + self.baseuri).expand(**values) + + res = self.session.get(uri) + self.handle_http_error(res) + return res
Static Map API
mapbox/mapbox-sdk-py
diff --git a/tests/test_staticmaps.py b/tests/test_staticmaps.py new file mode 100644 index 0000000..548ab2f --- /dev/null +++ b/tests/test_staticmaps.py @@ -0,0 +1,99 @@ +import json + +try: + from urllib import quote +except ImportError: + # python 3 + from urllib.parse import quote + +try: + from collections import OrderedDict +except ImportError: + # python 2.6 + from ordereddict import OrderedDict + +import pytest +import responses + +import mapbox + + [email protected] +def points(): + points = [ + OrderedDict( + type="Feature", + properties=OrderedDict(title="point1"), + geometry=OrderedDict( + type="Point", + coordinates=[-61.7, 12.1])), + OrderedDict( + type="Feature", + properties=OrderedDict(title="point2"), + geometry=OrderedDict( + type="Point", + coordinates=[-61.6, 12.0]))] + + return points + + [email protected] +def test_staticmap_lonlatz_only(): + + responses.add( + responses.GET, + 'https://api.mapbox.com/v4/mapbox.satellite/-61.7,12.1,12/600x600.png256?access_token=pk.test', + match_querystring=True, + body='png123', + status=200, + content_type='image/png') + + res = mapbox.Static(access_token='pk.test').image('mapbox.satellite', -61.7, 12.1, 12) + assert res.status_code == 200 + + [email protected] +def test_staticmap_lonlatz_features(points): + + overlay = json.dumps({'type': 'FeatureCollection', + 'features': points}) + overlay = quote(overlay) + url = ('https://api.mapbox.com/v4/mapbox.satellite/geojson({0})/' + '-61.7,12.1,12/600x600.png256?access_token=pk.test'.format(overlay)) + + responses.add( + responses.GET, url, + match_querystring=True, + body='png123', + status=200, + content_type='image/png') + + res = mapbox.Static(access_token='pk.test').image('mapbox.satellite', + -61.7, 12.1, 12, + points) + assert res.status_code == 200 + [email protected] +def test_staticmap_auto_features(points): + + overlay = json.dumps({'type': 'FeatureCollection', + 'features': points}) + overlay = quote(overlay) + url = ('https://api.mapbox.com/v4/mapbox.satellite/geojson({0})/' + 'auto/600x600.png256?access_token=pk.test'.format(overlay)) + + responses.add( + responses.GET, url, + match_querystring=True, + body='png123', + status=200, + content_type='image/png') + + res = mapbox.Static(access_token='pk.test').image('mapbox.satellite', + features=points) + assert res.status_code == 200 + + +def test_staticmap_auto_nofeatures(points): + with pytest.raises(ValueError): + mapbox.Static(access_token='pk.test').image('mapbox.satellite')
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 3, "test_score": 1 }, "num_modified_files": 3 }
0.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
boto3==1.37.23 botocore==1.37.23 cachetools==5.5.2 certifi==2025.1.31 chardet==5.2.0 charset-normalizer==3.4.1 click==8.1.8 click-plugins==1.1.1 cligj==0.7.2 colorama==0.4.6 coverage==7.8.0 coveralls==4.0.1 distlib==0.3.9 docopt==0.6.2 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work filelock==3.18.0 idna==3.10 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work jmespath==1.0.1 -e git+https://github.com/mapbox/mapbox-sdk-py.git@ab45a8e1a40b5ecbe9e6c59002883e291856dcc9#egg=mapbox packaging @ file:///croot/packaging_1734472117206/work platformdirs==4.3.7 pluggy @ file:///croot/pluggy_1733169602837/work pyproject-api==1.9.0 pytest @ file:///croot/pytest_1738938843180/work pytest-cov==6.0.0 python-dateutil==2.9.0.post0 PyYAML==6.0.2 requests==2.32.3 responses==0.25.7 s3transfer==0.11.4 six==1.17.0 tomli==2.2.1 tox==4.25.0 typing_extensions==4.13.0 uritemplate==4.1.1 uritemplate.py==3.0.2 urllib3==1.26.20 virtualenv==20.29.3
name: mapbox-sdk-py channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - boto3==1.37.23 - botocore==1.37.23 - cachetools==5.5.2 - certifi==2025.1.31 - chardet==5.2.0 - charset-normalizer==3.4.1 - click==8.1.8 - click-plugins==1.1.1 - cligj==0.7.2 - colorama==0.4.6 - coverage==7.8.0 - coveralls==4.0.1 - distlib==0.3.9 - docopt==0.6.2 - filelock==3.18.0 - idna==3.10 - jmespath==1.0.1 - platformdirs==4.3.7 - pyproject-api==1.9.0 - pytest-cov==6.0.0 - python-dateutil==2.9.0.post0 - pyyaml==6.0.2 - requests==2.32.3 - responses==0.25.7 - s3transfer==0.11.4 - six==1.17.0 - tomli==2.2.1 - tox==4.25.0 - typing-extensions==4.13.0 - uritemplate==4.1.1 - uritemplate-py==3.0.2 - urllib3==1.26.20 - virtualenv==20.29.3 prefix: /opt/conda/envs/mapbox-sdk-py
[ "tests/test_staticmaps.py::test_staticmap_lonlatz_only", "tests/test_staticmaps.py::test_staticmap_lonlatz_features", "tests/test_staticmaps.py::test_staticmap_auto_features", "tests/test_staticmaps.py::test_staticmap_auto_nofeatures" ]
[]
[]
[]
MIT License
316
falconry__falcon-664
3a6ce66edb68261f66bd74f2f0f756900da78225
2015-11-30 18:10:25
b78ffaac7c412d3b3d6cd3c70dd05024d79d2cce
diff --git a/doc/api/errors.rst b/doc/api/errors.rst index 26a5960..6cabdfa 100644 --- a/doc/api/errors.rst +++ b/doc/api/errors.rst @@ -36,5 +36,5 @@ Predefined Errors HTTPBadRequest, HTTPUnauthorized, HTTPForbidden, HTTPNotFound, HTTPMethodNotAllowed, HTTPNotAcceptable, HTTPConflict, HTTPLengthRequired, HTTPPreconditionFailed, HTTPUnsupportedMediaType, - HTTPRangeNotSatisfiable, HTTPInternalServerError, HTTPBadGateway, - HTTPServiceUnavailable + HTTPRangeNotSatisfiable, HTTPUnprocessableEntity, HTTPInternalServerError, + HTTPBadGateway, HTTPServiceUnavailable diff --git a/doc/api/status.rst b/doc/api/status.rst index 9e72add..0aa6c68 100644 --- a/doc/api/status.rst +++ b/doc/api/status.rst @@ -95,6 +95,7 @@ string objects that must be created when preparing responses. HTTP_REQUESTED_RANGE_NOT_SATISFIABLE = HTTP_416 HTTP_EXPECTATION_FAILED = HTTP_417 HTTP_IM_A_TEAPOT = HTTP_418 + HTTP_UNPROCESSABLE_ENTITY = HTTP_422 HTTP_UPGRADE_REQUIRED = HTTP_426 HTTP_PRECONDITION_REQUIRED = HTTP_428 HTTP_TOO_MANY_REQUESTS = HTTP_429 @@ -119,6 +120,7 @@ string objects that must be created when preparing responses. HTTP_416 = '416 Range Not Satisfiable' HTTP_417 = '417 Expectation Failed' HTTP_418 = "418 I'm a teapot" + HTTP_422 = "422 Unprocessable Entity" HTTP_426 = '426 Upgrade Required' HTTP_428 = '428 Precondition Required' HTTP_429 = '429 Too Many Requests' diff --git a/falcon/errors.py b/falcon/errors.py index 49f2747..7f75b3e 100644 --- a/falcon/errors.py +++ b/falcon/errors.py @@ -306,6 +306,24 @@ class HTTPRangeNotSatisfiable(NoRepresentation, HTTPError): headers=headers) +class HTTPUnprocessableEntity(HTTPError): + """422 Unprocessable Entity. + + The request was well-formed but was unable to be followed due to semantic + errors. See also: http://www.ietf.org/rfc/rfc4918. + + Args: + title (str): Error title (e.g., 'Missing title field'). + description (str): Human-friendly description of the error, along with + a helpful suggestion or two. + kwargs (optional): Same as for ``HTTPError``. + """ + + def __init__(self, title, description, **kwargs): + super(HTTPUnprocessableEntity, self).__init__(status.HTTP_422, title, + description, **kwargs) + + class HTTPInternalServerError(HTTPError): """500 Internal Server Error. diff --git a/falcon/status_codes.py b/falcon/status_codes.py index f0c87a4..c27c602 100644 --- a/falcon/status_codes.py +++ b/falcon/status_codes.py @@ -90,6 +90,8 @@ HTTP_417 = '417 Expectation Failed' HTTP_EXPECTATION_FAILED = HTTP_417 HTTP_418 = "418 I'm a teapot" HTTP_IM_A_TEAPOT = HTTP_418 +HTTP_422 = "422 Unprocessable Entity" +HTTP_UNPROCESSABLE_ENTITY = HTTP_422 HTTP_426 = '426 Upgrade Required' HTTP_UPGRADE_REQUIRED = HTTP_426 HTTP_428 = '428 Precondition Required'
Adding HTTP_422, Unprocessable Entity I guess we need such response support.
falconry/falcon
diff --git a/tests/test_httperror.py b/tests/test_httperror.py index 1a99ccb..e48d4b0 100644 --- a/tests/test_httperror.py +++ b/tests/test_httperror.py @@ -676,5 +676,6 @@ class TestHTTPError(testing.TestBase): self._misc_test(falcon.HTTPPreconditionFailed, falcon.HTTP_412) self._misc_test(falcon.HTTPUnsupportedMediaType, falcon.HTTP_415, needs_title=False) + self._misc_test(falcon.HTTPUnprocessableEntity, falcon.HTTP_422) self._misc_test(falcon.HTTPInternalServerError, falcon.HTTP_500) self._misc_test(falcon.HTTPBadGateway, falcon.HTTP_502)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 4 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "nose", "coverage", "ddt", "pyyaml", "requests", "testtools", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "tools/test-requires" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi==2025.1.31 charset-normalizer==3.4.1 coverage==7.8.0 ddt==1.7.2 exceptiongroup==1.2.2 -e git+https://github.com/falconry/falcon.git@3a6ce66edb68261f66bd74f2f0f756900da78225#egg=falcon idna==3.10 iniconfig==2.1.0 nose==1.3.7 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 python-mimeparse==2.0.0 PyYAML==6.0.2 requests==2.32.3 six==1.17.0 testtools==2.7.2 tomli==2.2.1 urllib3==2.3.0
name: falcon channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - certifi==2025.1.31 - charset-normalizer==3.4.1 - coverage==7.8.0 - ddt==1.7.2 - exceptiongroup==1.2.2 - idna==3.10 - iniconfig==2.1.0 - nose==1.3.7 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - python-mimeparse==2.0.0 - pyyaml==6.0.2 - requests==2.32.3 - six==1.17.0 - testtools==2.7.2 - tomli==2.2.1 - urllib3==2.3.0 prefix: /opt/conda/envs/falcon
[ "tests/test_httperror.py::TestHTTPError::test_misc" ]
[]
[ "tests/test_httperror.py::TestHTTPError::test_401", "tests/test_httperror.py::TestHTTPError::test_404_with_body", "tests/test_httperror.py::TestHTTPError::test_404_without_body", "tests/test_httperror.py::TestHTTPError::test_405_with_body", "tests/test_httperror.py::TestHTTPError::test_405_without_body", "tests/test_httperror.py::TestHTTPError::test_405_without_body_with_extra_headers", "tests/test_httperror.py::TestHTTPError::test_405_without_body_with_extra_headers_double_check", "tests/test_httperror.py::TestHTTPError::test_411", "tests/test_httperror.py::TestHTTPError::test_413", "tests/test_httperror.py::TestHTTPError::test_416", "tests/test_httperror.py::TestHTTPError::test_503_datetime_retry_after", "tests/test_httperror.py::TestHTTPError::test_503_integer_retry_after", "tests/test_httperror.py::TestHTTPError::test_base_class", "tests/test_httperror.py::TestHTTPError::test_client_does_not_accept_anything", "tests/test_httperror.py::TestHTTPError::test_client_does_not_accept_json_or_xml", "tests/test_httperror.py::TestHTTPError::test_custom_error_serializer", "tests/test_httperror.py::TestHTTPError::test_epic_fail_json", "tests/test_httperror.py::TestHTTPError::test_epic_fail_xml_1_text_xml", "tests/test_httperror.py::TestHTTPError::test_epic_fail_xml_2_application_xml", "tests/test_httperror.py::TestHTTPError::test_epic_fail_xml_3_application_vnd_company_system_project_resource_xml_v_1_1", "tests/test_httperror.py::TestHTTPError::test_epic_fail_xml_4_application_atom_xml", "tests/test_httperror.py::TestHTTPError::test_forbidden_1_application_json", "tests/test_httperror.py::TestHTTPError::test_forbidden_2_application_vnd_company_system_project_resource_json_v_1_1", "tests/test_httperror.py::TestHTTPError::test_forbidden_3_application_json_patch_json", "tests/test_httperror.py::TestHTTPError::test_invalid_header", "tests/test_httperror.py::TestHTTPError::test_invalid_param", "tests/test_httperror.py::TestHTTPError::test_missing_header", "tests/test_httperror.py::TestHTTPError::test_missing_param", "tests/test_httperror.py::TestHTTPError::test_no_description_json", "tests/test_httperror.py::TestHTTPError::test_no_description_xml", "tests/test_httperror.py::TestHTTPError::test_temporary_413_datetime_retry_after", "tests/test_httperror.py::TestHTTPError::test_temporary_413_integer_retry_after", "tests/test_httperror.py::TestHTTPError::test_unicode_json", "tests/test_httperror.py::TestHTTPError::test_unicode_xml" ]
[]
Apache License 2.0
317
juju-solutions__charms.benchmark-3
df2acf8736cce39d905990fd5008d6afa57863c3
2015-12-01 12:31:43
df2acf8736cce39d905990fd5008d6afa57863c3
tvansteenburgh: Unrelated to your change, but I don't think the `in_relation_hook()` guard on __init__.py:133 is correct. Benchmark.start() is usually called in an action, not a relation, and we want the action_uuid to be set on the relation regardless of when Benchmark.start() is called.
diff --git a/charms/benchmark/__init__.py b/charms/benchmark/__init__.py index 9a5458e..f0c15c5 100644 --- a/charms/benchmark/__init__.py +++ b/charms/benchmark/__init__.py @@ -130,14 +130,14 @@ class Benchmark(object): charm_dir = os.environ.get('CHARM_DIR') action_uuid = os.environ.get('JUJU_ACTION_UUID') - if in_relation_hook() and charm_dir and action_uuid: + if charm_dir and action_uuid: """ If the cabs-collector charm is installed, take a snapshot of the current profile data. """ # Do profile data collection immediately on this unit if os.path.exists(COLLECT_PROFILE_DATA): - subprocess.check_output([COLLECT_PROFILE_DATA]) + subprocess.check_output([COLLECT_PROFILE_DATA, action_uuid]) with open( os.path.join(
Action UUID needs to be set explicitly to the collect-profile-data script
juju-solutions/charms.benchmark
diff --git a/tests/test_charms-benchmark.py b/tests/test_charms-benchmark.py index 3b78034..8528993 100644 --- a/tests/test_charms-benchmark.py +++ b/tests/test_charms-benchmark.py @@ -146,7 +146,7 @@ class TestBenchmark(TestCase): COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data' exists.assert_any_call(COLLECT_PROFILE_DATA) - check_output.assert_any_call([COLLECT_PROFILE_DATA]) + check_output.assert_any_call([COLLECT_PROFILE_DATA, 'my_action']) @mock.patch('charms.benchmark.action_set') def test_benchmark_finish(self, action_set):
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 3, "test_score": 1 }, "num_modified_files": 1 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "nose", "coverage", "testtools", "pep8", "mock", "cherrypy", "pyyaml", "six", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "test-requires.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
autocommand==2.2.2 backports.tarfile==1.2.0 charmhelpers==1.2.1 -e git+https://github.com/juju-solutions/charms.benchmark.git@df2acf8736cce39d905990fd5008d6afa57863c3#egg=charms.benchmark cheroot==10.0.1 CherryPy==18.10.0 coverage==7.8.0 exceptiongroup==1.2.2 iniconfig==2.1.0 jaraco.collections==5.1.0 jaraco.context==6.0.1 jaraco.functools==4.1.0 jaraco.text==4.0.0 Jinja2==3.1.6 MarkupSafe==3.0.2 mock==5.2.0 more-itertools==10.6.0 netaddr==1.3.0 nose==1.3.7 packaging==24.2 pbr==6.1.1 pep8==1.7.1 pluggy==1.5.0 portend==3.2.0 pytest==8.3.5 python-dateutil==2.9.0.post0 PyYAML==6.0.2 six==1.17.0 tempora==5.8.0 testtools==2.7.2 tomli==2.2.1 zc.lockfile==3.0.post1
name: charms.benchmark channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - autocommand==2.2.2 - backports-tarfile==1.2.0 - charmhelpers==1.2.1 - cheroot==10.0.1 - cherrypy==18.10.0 - coverage==7.8.0 - exceptiongroup==1.2.2 - iniconfig==2.1.0 - jaraco-collections==5.1.0 - jaraco-context==6.0.1 - jaraco-functools==4.1.0 - jaraco-text==4.0.0 - jinja2==3.1.6 - markupsafe==3.0.2 - mock==5.2.0 - more-itertools==10.6.0 - netaddr==1.3.0 - nose==1.3.7 - packaging==24.2 - pbr==6.1.1 - pep8==1.7.1 - pluggy==1.5.0 - portend==3.2.0 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - pyyaml==6.0.2 - six==1.17.0 - tempora==5.8.0 - testtools==2.7.2 - tomli==2.2.1 - zc-lockfile==3.0.post1 prefix: /opt/conda/envs/charms.benchmark
[ "tests/test_charms-benchmark.py::TestBenchmark::test_benchmark_start" ]
[]
[ "tests/test_charms-benchmark.py::TestBenchmark::test_benchmark_finish", "tests/test_charms-benchmark.py::TestBenchmark::test_benchmark_finish_oserror", "tests/test_charms-benchmark.py::TestBenchmark::test_benchmark_init", "tests/test_charms-benchmark.py::TestBenchmark::test_benchmark_meta", "tests/test_charms-benchmark.py::TestBenchmark::test_benchmark_set_composite_score", "tests/test_charms-benchmark.py::TestBenchmark::test_benchmark_start_oserror", "tests/test_charms-benchmark.py::TestBenchmark::test_set_data" ]
[]
null
318
pre-commit__pre-commit-310
6b005cff0d5d4f579be5dbb97102c4fee3b4e39f
2015-12-01 16:34:13
c1c3f3b571adcd0cf5a8cea7d9d80574c2572c02
diff --git a/pre_commit/error_handler.py b/pre_commit/error_handler.py index c8d2bfc..60038f4 100644 --- a/pre_commit/error_handler.py +++ b/pre_commit/error_handler.py @@ -7,7 +7,9 @@ import io import os.path import traceback +from pre_commit import five from pre_commit.errors import FatalError +from pre_commit.output import sys_stdout_write_wrapper from pre_commit.store import Store @@ -16,15 +18,15 @@ class PreCommitSystemExit(SystemExit): pass -def _log_and_exit(msg, exc, formatted, print_fn=print): - error_msg = '{0}: {1}: {2}'.format(msg, type(exc).__name__, exc) - print_fn(error_msg) - print_fn('Check the log at ~/.pre-commit/pre-commit.log') +def _log_and_exit(msg, exc, formatted, write_fn=sys_stdout_write_wrapper): + error_msg = '{0}: {1}: {2}\n'.format(msg, type(exc).__name__, exc) + write_fn(error_msg) + write_fn('Check the log at ~/.pre-commit/pre-commit.log\n') store = Store() store.require_created() - with io.open(os.path.join(store.directory, 'pre-commit.log'), 'w') as log: - log.write(error_msg + '\n') - log.write(formatted + '\n') + with io.open(os.path.join(store.directory, 'pre-commit.log'), 'wb') as log: + log.write(five.to_bytes(error_msg)) + log.write(five.to_bytes(formatted) + b'\n') raise PreCommitSystemExit(1)
Non-ascii prints in error handler without tty cause stacktrace ``` 23:00:13 style runtests: commands[0] | pre-commit run --all-files 23:00:13 [INFO] Installing environment for [email protected]:mirrors/pre-commit/mirrors-jshint. 23:00:13 [INFO] Once installed this environment will be reused. 23:00:13 [INFO] This may take a few minutes... 23:01:33 Traceback (most recent call last): 23:01:33 File ".tox/style/bin/pre-commit", line 11, in <module> 23:01:33 sys.exit(main()) 23:01:33 File ".../.tox/style/local/lib/python2.7/site-packages/pre_commit/main.py", line 157, in main 23:01:33 'Command {0} failed to exit with a returncode'.format(args.command) 23:01:33 File "/usr/lib64/python2.7/contextlib.py", line 35, in __exit__ 23:01:33 self.gen.throw(type, value, traceback) 23:01:33 File ".../.tox/style/local/lib/python2.7/site-packages/pre_commit/error_handler.py", line 41, in error_handler 23:01:33 traceback.format_exc(), 23:01:33 File ".../.tox/style/local/lib/python2.7/site-packages/pre_commit/error_handler.py", line 21, in _log_and_exit 23:01:33 print_fn(error_msg) 23:01:33 UnicodeEncodeError: 'ascii' codec can't encode characters in position 735-737: ordinal not in range(128) ```
pre-commit/pre-commit
diff --git a/tests/error_handler_test.py b/tests/error_handler_test.py index 161b88f..d8f966a 100644 --- a/tests/error_handler_test.py +++ b/tests/error_handler_test.py @@ -1,15 +1,18 @@ +# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import unicode_literals import io import os.path import re +import sys import mock import pytest from pre_commit import error_handler from pre_commit.errors import FatalError +from pre_commit.util import cmd_output @pytest.yield_fixture @@ -72,17 +75,17 @@ def test_error_handler_uncaught_error(mocked_log_and_exit): def test_log_and_exit(mock_out_store_directory): - mocked_print = mock.Mock() + mocked_write = mock.Mock() with pytest.raises(error_handler.PreCommitSystemExit): error_handler._log_and_exit( 'msg', FatalError('hai'), "I'm a stacktrace", - print_fn=mocked_print, + write_fn=mocked_write, ) - printed = '\n'.join(call[0][0] for call in mocked_print.call_args_list) + printed = ''.join(call[0][0] for call in mocked_write.call_args_list) assert printed == ( 'msg: FatalError: hai\n' - 'Check the log at ~/.pre-commit/pre-commit.log' + 'Check the log at ~/.pre-commit/pre-commit.log\n' ) log_file = os.path.join(mock_out_store_directory, 'pre-commit.log') @@ -92,3 +95,25 @@ def test_log_and_exit(mock_out_store_directory): 'msg: FatalError: hai\n' "I'm a stacktrace\n" ) + + +def test_error_handler_non_ascii_exception(mock_out_store_directory): + with pytest.raises(error_handler.PreCommitSystemExit): + with error_handler.error_handler(): + raise ValueError('☃') + + +def test_error_handler_no_tty(tempdir_factory): + output = cmd_output( + sys.executable, '-c', + 'from __future__ import unicode_literals\n' + 'from pre_commit.error_handler import error_handler\n' + 'with error_handler():\n' + ' raise ValueError("\\u2603")\n', + env=dict(os.environ, PRE_COMMIT_HOME=tempdir_factory.get()), + retcode=1, + ) + assert output[1].replace('\r', '') == ( + 'An unexpected error has occurred: ValueError: ☃\n' + 'Check the log at ~/.pre-commit/pre-commit.log\n' + )
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 1 }
0.6
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.5", "reqs_path": [ "requirements-dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
aspy.yaml==1.3.0 astroid==1.3.2 attrs==22.2.0 cached-property==1.5.2 certifi==2021.5.30 coverage==6.2 distlib==0.3.9 filelock==3.4.1 flake8==5.0.4 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 jsonschema==3.2.0 logilab-common==1.9.7 mccabe==0.7.0 mock==5.2.0 mypy-extensions==1.0.0 nodeenv==1.6.0 ordereddict==1.1 packaging==21.3 platformdirs==2.4.0 pluggy==1.0.0 -e git+https://github.com/pre-commit/pre-commit.git@6b005cff0d5d4f579be5dbb97102c4fee3b4e39f#egg=pre_commit py==1.11.0 pycodestyle==2.9.1 pyflakes==2.5.0 pylint==1.3.1 pyparsing==3.1.4 pyrsistent==0.18.0 pytest==7.0.1 PyYAML==6.0.1 simplejson==3.20.1 six==1.17.0 tomli==1.2.3 typing_extensions==4.1.1 virtualenv==20.17.1 zipp==3.6.0
name: pre-commit channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - argparse==1.4.0 - aspy-yaml==1.3.0 - astroid==1.3.2 - attrs==22.2.0 - cached-property==1.5.2 - coverage==6.2 - distlib==0.3.9 - filelock==3.4.1 - flake8==5.0.4 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - jsonschema==3.2.0 - logilab-common==1.9.7 - mccabe==0.7.0 - mock==5.2.0 - mypy-extensions==1.0.0 - nodeenv==1.6.0 - ordereddict==1.1 - packaging==21.3 - platformdirs==2.4.0 - pluggy==1.0.0 - py==1.11.0 - pycodestyle==2.9.1 - pyflakes==2.5.0 - pylint==1.3.1 - pyparsing==3.1.4 - pyrsistent==0.18.0 - pytest==7.0.1 - pyyaml==6.0.1 - simplejson==3.20.1 - six==1.17.0 - tomli==1.2.3 - typing-extensions==4.1.1 - virtualenv==20.17.1 - zipp==3.6.0 prefix: /opt/conda/envs/pre-commit
[ "tests/error_handler_test.py::test_log_and_exit" ]
[]
[ "tests/error_handler_test.py::test_error_handler_no_exception", "tests/error_handler_test.py::test_error_handler_fatal_error", "tests/error_handler_test.py::test_error_handler_uncaught_error", "tests/error_handler_test.py::test_error_handler_non_ascii_exception", "tests/error_handler_test.py::test_error_handler_no_tty" ]
[]
MIT License
319
getlogbook__logbook-183
1d999a784d0d8f5f7423f25c684cc1100843ccc5
2015-12-03 01:44:29
bb0f4fbeec318a140780b1ac8781599474cf2666
diff --git a/logbook/handlers.py b/logbook/handlers.py index 82e518f..5f66978 100644 --- a/logbook/handlers.py +++ b/logbook/handlers.py @@ -20,6 +20,7 @@ try: except ImportError: from sha import new as sha1 import traceback +import collections from datetime import datetime, timedelta from collections import deque from textwrap import dedent @@ -1014,14 +1015,42 @@ class MailHandler(Handler, StringFormatterHandlerMixin, The default timedelta is 60 seconds (one minute). - The mail handler is sending mails in a blocking manner. If you are not + The mail handler sends mails in a blocking manner. If you are not using some centralized system for logging these messages (with the help of ZeroMQ or others) and the logging system slows you down you can wrap the handler in a :class:`logbook.queues.ThreadedWrapperHandler` that will then send the mails in a background thread. + `server_addr` can be a tuple of host and port, or just a string containing + the host to use the default port (25, or 465 if connecting securely.) + + `credentials` can be a tuple or dictionary of arguments that will be passed + to :py:meth:`smtplib.SMTP.login`. + + `secure` can be a tuple, dictionary, or boolean. As a boolean, this will + simply enable or disable a secure connection. The tuple is unpacked as + parameters `keyfile`, `certfile`. As a dictionary, `secure` should contain + those keys. For backwards compatibility, ``secure=()`` will enable a secure + connection. If `starttls` is enabled (default), these parameters will be + passed to :py:meth:`smtplib.SMTP.starttls`, otherwise + :py:class:`smtplib.SMTP_SSL`. + + .. versionchanged:: 0.3 The handler supports the batching system now. + + .. versionadded:: 1.0 + `starttls` parameter added to allow disabling STARTTLS for SSL + connections. + + .. versionchanged:: 1.0 + If `server_addr` is a string, the default port will be used. + + .. versionchanged:: 1.0 + `credentials` parameter can now be a dictionary of keyword arguments. + + .. versionchanged:: 1.0 + `secure` can now be a dictionary or boolean in addition to to a tuple. """ default_format_string = MAIL_FORMAT_STRING default_related_format_string = MAIL_RELATED_FORMAT_STRING @@ -1039,7 +1068,7 @@ class MailHandler(Handler, StringFormatterHandlerMixin, server_addr=None, credentials=None, secure=None, record_limit=None, record_delta=None, level=NOTSET, format_string=None, related_format_string=None, - filter=None, bubble=False): + filter=None, bubble=False, starttls=True): Handler.__init__(self, level, filter, bubble) StringFormatterHandlerMixin.__init__(self, format_string) LimitingHandlerMixin.__init__(self, record_limit, record_delta) @@ -1054,6 +1083,7 @@ class MailHandler(Handler, StringFormatterHandlerMixin, if related_format_string is None: related_format_string = self.default_related_format_string self.related_format_string = related_format_string + self.starttls = starttls def _get_related_format_string(self): if isinstance(self.related_formatter, StringFormatter): @@ -1148,20 +1178,63 @@ class MailHandler(Handler, StringFormatterHandlerMixin, """Returns an SMTP connection. By default it reconnects for each sent mail. """ - from smtplib import SMTP, SMTP_PORT, SMTP_SSL_PORT + from smtplib import SMTP, SMTP_SSL, SMTP_PORT, SMTP_SSL_PORT if self.server_addr is None: host = '127.0.0.1' port = self.secure and SMTP_SSL_PORT or SMTP_PORT else: - host, port = self.server_addr - con = SMTP() - con.connect(host, port) + try: + host, port = self.server_addr + except ValueError: + # If server_addr is a string, the tuple unpacking will raise + # ValueError, and we can use the default port. + host = self.server_addr + port = self.secure and SMTP_SSL_PORT or SMTP_PORT + + # Previously, self.secure was passed as con.starttls(*self.secure). This + # meant that starttls couldn't be used without a keyfile and certfile + # unless an empty tuple was passed. See issue #94. + # + # The changes below allow passing: + # - secure=True for secure connection without checking identity. + # - dictionary with keys 'keyfile' and 'certfile'. + # - tuple to be unpacked to variables keyfile and certfile. + # - secure=() equivalent to secure=True for backwards compatibility. + # - secure=False equivalent to secure=None to disable. + if isinstance(self.secure, collections.Mapping): + keyfile = self.secure.get('keyfile', None) + certfile = self.secure.get('certfile', None) + elif isinstance(self.secure, collections.Iterable): + # Allow empty tuple for backwards compatibility + if len(self.secure) == 0: + keyfile = certfile = None + else: + keyfile, certfile = self.secure + else: + keyfile = certfile = None + + # Allow starttls to be disabled by passing starttls=False. + if not self.starttls and self.secure: + con = SMTP_SSL(host, port, keyfile=keyfile, certfile=certfile) + else: + con = SMTP(host, port) + if self.credentials is not None: - if self.secure is not None: + secure = self.secure + if self.starttls and secure is not None and secure is not False: con.ehlo() - con.starttls(*self.secure) + con.starttls(keyfile=keyfile, certfile=certfile) con.ehlo() - con.login(*self.credentials) + + # Allow credentials to be a tuple or dict. + if isinstance(self.credentials, collections.Mapping): + credentials_args = () + credentials_kwargs = self.credentials + else: + credentials_args = self.credentials + credentials_kwargs = dict() + + con.login(*credentials_args, **credentials_kwargs) return con def close_connection(self, con): @@ -1175,7 +1248,7 @@ class MailHandler(Handler, StringFormatterHandlerMixin, pass def deliver(self, msg, recipients): - """Delivers the given message to a list of recpients.""" + """Delivers the given message to a list of recipients.""" con = self.get_connection() try: con.sendmail(self.from_addr, recipients, msg.as_string()) @@ -1227,7 +1300,7 @@ class GMailHandler(MailHandler): def __init__(self, account_id, password, recipients, **kw): super(GMailHandler, self).__init__( - account_id, recipients, secure=(), + account_id, recipients, secure=True, server_addr=("smtp.gmail.com", 587), credentials=(account_id, password), **kw) diff --git a/setup.py b/setup.py index 26df542..bdb9b00 100644 --- a/setup.py +++ b/setup.py @@ -158,6 +158,10 @@ with open(version_file_path) as version_file: extras_require = dict() extras_require['test'] = set(['pytest', 'pytest-cov']) + +if sys.version_info[:2] < (3, 3): + extras_require['test'] |= set(['mock']) + extras_require['dev'] = set(['cython']) | extras_require['test'] extras_require['execnet'] = set(['execnet>=1.0.9'])
SMTP Handler STARTTLS Due to the lack of documentation on this handler it took a little digging to work out how to get it to work... One thing that confused me was the "secure" argument. Python SMTPLib starttls() accepts two optional values: a keyfile and certfile - but these are only required for *checking* the identity. If neither are specified then SMTPLib will still try establish an encrypted connection but without checking the identity. If you do not specify an argument to Logbook, it will not attempt to establish an encrypted connection at all. So, if you want a tls connection to the SMTP server but don't care about checking the identity you can do `secure = []` which will pass the `if self.secure is not None`, however if you do `secure = True` you will get an error because you cannot unpack a boolean! (as logbook populates the arguments using: `conn.starttls(*self.secure)`). It'd help if the documentation explained the arguments for the mail handlers.
getlogbook/logbook
diff --git a/tests/test_mail_handler.py b/tests/test_mail_handler.py index babc4e2..fd7730b 100644 --- a/tests/test_mail_handler.py +++ b/tests/test_mail_handler.py @@ -7,6 +7,11 @@ from logbook.helpers import u from .utils import capturing_stderr_context, make_fake_mail_handler +try: + from unittest.mock import Mock, call, patch +except ImportError: + from mock import Mock, call, patch + __file_without_pyc__ = __file__ if __file_without_pyc__.endswith('.pyc'): __file_without_pyc__ = __file_without_pyc__[:-1] @@ -104,3 +109,126 @@ def test_group_handler_mail_combo(activation_strategy, logger): assert len(related) == 2 assert re.search('Message type:\s+WARNING', related[0]) assert re.search('Message type:\s+DEBUG', related[1]) + + +def test_mail_handler_arguments(): + with patch('smtplib.SMTP', autospec=True) as mock_smtp: + + # Test the mail handler with supported arguments before changes to + # secure, credentials, and starttls + mail_handler = logbook.MailHandler( + from_addr='[email protected]', + recipients='[email protected]', + server_addr=('server.example.com', 465), + credentials=('username', 'password'), + secure=('keyfile', 'certfile')) + + mail_handler.get_connection() + + assert mock_smtp.call_args == call('server.example.com', 465) + assert mock_smtp.method_calls[1] == call().starttls( + keyfile='keyfile', certfile='certfile') + assert mock_smtp.method_calls[3] == call().login('username', 'password') + + # Test secure=() + mail_handler = logbook.MailHandler( + from_addr='[email protected]', + recipients='[email protected]', + server_addr=('server.example.com', 465), + credentials=('username', 'password'), + secure=()) + + mail_handler.get_connection() + + assert mock_smtp.call_args == call('server.example.com', 465) + assert mock_smtp.method_calls[5] == call().starttls( + certfile=None, keyfile=None) + assert mock_smtp.method_calls[7] == call().login('username', 'password') + + # Test implicit port with string server_addr, dictionary credentials, + # dictionary secure. + mail_handler = logbook.MailHandler( + from_addr='[email protected]', + recipients='[email protected]', + server_addr='server.example.com', + credentials={'user': 'username', 'password': 'password'}, + secure={'certfile': 'certfile2', 'keyfile': 'keyfile2'}) + + mail_handler.get_connection() + + assert mock_smtp.call_args == call('server.example.com', 465) + assert mock_smtp.method_calls[9] == call().starttls( + certfile='certfile2', keyfile='keyfile2') + assert mock_smtp.method_calls[11] == call().login( + user='username', password='password') + + # Test secure=True + mail_handler = logbook.MailHandler( + from_addr='[email protected]', + recipients='[email protected]', + server_addr=('server.example.com', 465), + credentials=('username', 'password'), + secure=True) + + mail_handler.get_connection() + + assert mock_smtp.call_args == call('server.example.com', 465) + assert mock_smtp.method_calls[13] == call().starttls( + certfile=None, keyfile=None) + assert mock_smtp.method_calls[15] == call().login('username', 'password') + assert len(mock_smtp.method_calls) == 16 + + # Test secure=False + mail_handler = logbook.MailHandler( + from_addr='[email protected]', + recipients='[email protected]', + server_addr=('server.example.com', 465), + credentials=('username', 'password'), + secure=False) + + mail_handler.get_connection() + + # starttls not called because we check len of method_calls before and + # after this test. + assert mock_smtp.call_args == call('server.example.com', 465) + assert mock_smtp.method_calls[16] == call().login('username', 'password') + assert len(mock_smtp.method_calls) == 17 + + with patch('smtplib.SMTP_SSL', autospec=True) as mock_smtp_ssl: + # Test starttls=False + mail_handler = logbook.MailHandler( + from_addr='[email protected]', + recipients='[email protected]', + server_addr='server.example.com', + credentials={'user': 'username', 'password': 'password'}, + secure={'certfile': 'certfile', 'keyfile': 'keyfile'}, + starttls=False) + + mail_handler.get_connection() + + assert mock_smtp_ssl.call_args == call( + 'server.example.com', 465, keyfile='keyfile', certfile='certfile') + assert mock_smtp_ssl.method_calls[0] == call().login( + user='username', password='password') + + # Test starttls=False with secure=True + mail_handler = logbook.MailHandler( + from_addr='[email protected]', + recipients='[email protected]', + server_addr='server.example.com', + credentials={'user': 'username', 'password': 'password'}, + secure=True, + starttls=False) + + mail_handler.get_connection() + + assert mock_smtp_ssl.call_args == call( + 'server.example.com', 465, keyfile=None, certfile=None) + assert mock_smtp_ssl.method_calls[1] == call().login( + user='username', password='password') + + + + + +
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 2 }
0.12
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "Cython" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
coverage==7.8.0 Cython==3.0.12 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work -e git+https://github.com/getlogbook/logbook.git@1d999a784d0d8f5f7423f25c684cc1100843ccc5#egg=Logbook packaging @ file:///croot/packaging_1734472117206/work pluggy @ file:///croot/pluggy_1733169602837/work pytest @ file:///croot/pytest_1738938843180/work pytest-cov==6.0.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
name: logbook channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.8.0 - cython==3.0.12 - pytest-cov==6.0.0 prefix: /opt/conda/envs/logbook
[ "tests/test_mail_handler.py::test_mail_handler_arguments" ]
[]
[ "tests/test_mail_handler.py::test_mail_handler[ContextEnteringStrategy]", "tests/test_mail_handler.py::test_mail_handler[PushingStrategy]", "tests/test_mail_handler.py::test_mail_handler_batching[ContextEnteringStrategy]", "tests/test_mail_handler.py::test_mail_handler_batching[PushingStrategy]", "tests/test_mail_handler.py::test_group_handler_mail_combo[ContextEnteringStrategy]", "tests/test_mail_handler.py::test_group_handler_mail_combo[PushingStrategy]" ]
[]
BSD License
320
projectmesa__mesa-178
57a0beb5947fc16b7b665f297504907e300b043c
2015-12-04 04:39:32
6db9efde7c659b9338fc8cf551f066cdba7031c3
diff --git a/mesa/space.py b/mesa/space.py index 5e3a9544..77fe8174 100644 --- a/mesa/space.py +++ b/mesa/space.py @@ -24,6 +24,20 @@ X = 0 Y = 1 +def accept_tuple_argument(wrapped_function): + ''' + Decorator to allow grid methods that take a list of (x, y) position tuples + to also handle a single position, by automatically wrapping tuple in + single-item list rather than forcing user to do it. + ''' + def wrapper(*args): + if isinstance(args[1], tuple) and len(args[1]) == 2: + return wrapped_function(args[0], [args[1]]) + else: + return wrapped_function(*args) + return wrapper + + class Grid(object): ''' Base class for a square grid. @@ -238,10 +252,11 @@ class Grid(object): x, y = pos return x < 0 or x >= self.width or y < 0 or y >= self.height + @accept_tuple_argument def iter_cell_list_contents(self, cell_list): ''' Args: - cell_list: Array-like of (x, y) tuples + cell_list: Array-like of (x, y) tuples, or single tuple. Returns: A iterator of the contents of the cells identified in cell_list @@ -249,10 +264,11 @@ class Grid(object): return ( self[y][x] for x, y in cell_list if not self.is_cell_empty((x, y))) + @accept_tuple_argument def get_cell_list_contents(self, cell_list): ''' Args: - cell_list: Array-like of (x, y) tuples + cell_list: Array-like of (x, y) tuples, or single tuple. Returns: A list of the contents of the cells identified in cell_list @@ -418,10 +434,11 @@ class MultiGrid(Grid): x, y = pos self.grid[y][x].remove(agent) + @accept_tuple_argument def iter_cell_list_contents(self, cell_list): ''' Args: - cell_list: Array-like of (x, y) tuples + cell_list: Array-like of (x, y) tuples, or single tuple. Returns: A iterator of the contents of the cells identified in cell_list
baby patch/tweak: allow cell methods to take single cell Cf. discussion in #176 -- taking on the small "(TODO: someone should probably fix that...)" from the tutorial re: grid's `get_cell_list_contents` requiring a list of cells even if it's just being passed a single cell. It's a very easy fix, I'm on the case. :-)
projectmesa/mesa
diff --git a/tests/test_grid.py b/tests/test_grid.py index b558f4d9..c09f0496 100644 --- a/tests/test_grid.py +++ b/tests/test_grid.py @@ -53,6 +53,43 @@ class TestBaseGrid(unittest.TestCase): x, y = agent.pos assert self.grid[y][x] == agent + def test_cell_agent_reporting(self): + ''' + Ensure that if an agent is in a cell, get_cell_list_contents accurately + reports that fact. + ''' + for agent in self.agents: + x, y = agent.pos + assert agent in self.grid.get_cell_list_contents([(x, y)]) + + def test_listfree_cell_agent_reporting(self): + ''' + Ensure that if an agent is in a cell, get_cell_list_contents accurately + reports that fact, even when single position is not wrapped in a list. + ''' + for agent in self.agents: + x, y = agent.pos + assert agent in self.grid.get_cell_list_contents((x, y)) + + def test_iter_cell_agent_reporting(self): + ''' + Ensure that if an agent is in a cell, iter_cell_list_contents + accurately reports that fact. + ''' + for agent in self.agents: + x, y = agent.pos + assert agent in self.grid.iter_cell_list_contents([(x, y)]) + + def test_listfree_iter_cell_agent_reporting(self): + ''' + Ensure that if an agent is in a cell, iter_cell_list_contents + accurately reports that fact, even when single position is not + wrapped in a list. + ''' + for agent in self.agents: + x, y = agent.pos + assert agent in self.grid.iter_cell_list_contents((x, y)) + def test_neighbors(self): ''' Test the base neighborhood methods on the non-toroid.
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_issue_reference", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 1 }
0.6
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "flake8" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
coverage==7.8.0 exceptiongroup==1.2.2 flake8==7.2.0 iniconfig==2.1.0 mccabe==0.7.0 -e git+https://github.com/projectmesa/mesa.git@57a0beb5947fc16b7b665f297504907e300b043c#egg=Mesa numpy==2.0.2 packaging==24.2 pandas==2.2.3 pluggy==1.5.0 pycodestyle==2.13.0 pyflakes==3.3.1 pytest==8.3.5 pytest-cov==6.0.0 python-dateutil==2.9.0.post0 pytz==2025.2 six==1.17.0 tomli==2.2.1 tornado==6.4.2 tzdata==2025.2
name: mesa channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.8.0 - exceptiongroup==1.2.2 - flake8==7.2.0 - iniconfig==2.1.0 - mccabe==0.7.0 - numpy==2.0.2 - packaging==24.2 - pandas==2.2.3 - pluggy==1.5.0 - pycodestyle==2.13.0 - pyflakes==3.3.1 - pytest==8.3.5 - pytest-cov==6.0.0 - python-dateutil==2.9.0.post0 - pytz==2025.2 - six==1.17.0 - tomli==2.2.1 - tornado==6.4.2 - tzdata==2025.2 prefix: /opt/conda/envs/mesa
[ "tests/test_grid.py::TestBaseGrid::test_listfree_cell_agent_reporting", "tests/test_grid.py::TestBaseGrid::test_listfree_iter_cell_agent_reporting", "tests/test_grid.py::TestBaseGridTorus::test_listfree_cell_agent_reporting", "tests/test_grid.py::TestBaseGridTorus::test_listfree_iter_cell_agent_reporting" ]
[]
[ "tests/test_grid.py::TestBaseGrid::test_agent_positions", "tests/test_grid.py::TestBaseGrid::test_cell_agent_reporting", "tests/test_grid.py::TestBaseGrid::test_coord_iter", "tests/test_grid.py::TestBaseGrid::test_iter_cell_agent_reporting", "tests/test_grid.py::TestBaseGrid::test_neighbors", "tests/test_grid.py::TestBaseGridTorus::test_agent_positions", "tests/test_grid.py::TestBaseGridTorus::test_cell_agent_reporting", "tests/test_grid.py::TestBaseGridTorus::test_coord_iter", "tests/test_grid.py::TestBaseGridTorus::test_iter_cell_agent_reporting", "tests/test_grid.py::TestBaseGridTorus::test_neighbors", "tests/test_grid.py::TestSingleGrid::test_enforcement", "tests/test_grid.py::TestMultiGrid::test_agent_positions", "tests/test_grid.py::TestMultiGrid::test_neighbors" ]
[]
Apache License 2.0
321
mapbox__mapbox-sdk-py-85
06728ffc30fba83003e9c76645ecec3eec1c63de
2015-12-04 14:13:44
06728ffc30fba83003e9c76645ecec3eec1c63de
diff --git a/mapbox/services/base.py b/mapbox/services/base.py index 4a25402..efd0eeb 100644 --- a/mapbox/services/base.py +++ b/mapbox/services/base.py @@ -1,6 +1,8 @@ """Base Service class""" import os +import base64 +import json import requests @@ -27,6 +29,20 @@ class Service: """A product token for use in User-Agent headers.""" return 'mapbox-sdk-py/{0}'.format(__version__) + @property + def username(self): + """Get username from access token + Token contains base64 encoded json object with username""" + token = self.session.params['access_token'] + if not token: + raise ValueError("session does not have a valid access_token param") + data = token.split('.')[1] + data = data.replace('-', '+').replace('_', '/') + try: + return json.loads(base64.b64decode(data).decode('utf-8'))['u'] + except (ValueError, KeyError): + raise ValueError("access_token does not contain username") + def handle_http_error(self, response, custom_messages=None, raise_for_status=False): if not custom_messages: diff --git a/mapbox/services/uploads.py b/mapbox/services/uploads.py index 47e2451..635a53e 100644 --- a/mapbox/services/uploads.py +++ b/mapbox/services/uploads.py @@ -11,7 +11,7 @@ class Uploader(Service): from mapbox import Uploader - u = Uploader('username') + u = Uploader() url = u.stage('test.tif') job = u.create(url, 'test1').json() @@ -24,8 +24,7 @@ class Uploader(Service): assert job not in u.list().json() """ - def __init__(self, username, access_token=None): - self.username = username + def __init__(self, access_token=None): self.baseuri = 'https://api.mapbox.com/uploads/v1' self.session = self.get_session(access_token)
Get user from token As in https://github.com/mapbox/mapbox-sdk-js/blob/master/lib/get_user.js. This allows us to eliminate a duplicate parameter when creating dataset and upload service instances.
mapbox/mapbox-sdk-py
diff --git a/tests/test_base.py b/tests/test_base.py index ff773d7..39a3311 100644 --- a/tests/test_base.py +++ b/tests/test_base.py @@ -1,3 +1,6 @@ +import base64 +import os + import pytest import requests import responses @@ -60,3 +63,36 @@ def test_custom_messages(): with pytest.raises(requests.exceptions.HTTPError) as exc: assert service.handle_http_error(response, raise_for_status=True) assert "401" in exc.value.message + + +class MockService(mapbox.Service): + def __init__(self, access_token=None): + # In order to get a username, a session must be created on init + self.session = self.get_session(access_token) + +def test_username(monkeypatch): + token = 'pk.{0}.test'.format(base64.b64encode(b'{"u":"testuser"}').decode('utf-8')) + service = MockService(access_token=token) + assert service.username == 'testuser' + +def test_username_failures(monkeypatch): + # If your child class doesn't create a session + service = mapbox.Service() + with pytest.raises(AttributeError) as exc: + service.username + assert 'session' in exc.value.message + + if 'MAPBOX_ACCESS_TOKEN' in os.environ: + monkeypatch.delenv('MAPBOX_ACCESS_TOKEN') + service = MockService() + with pytest.raises(ValueError) as exc: + service.username + assert 'access_token' in exc.value.message + assert 'param' in exc.value.message + + token = "not.good" + service = MockService(access_token=token) + with pytest.raises(ValueError) as exc: + service.username + assert 'access_token' in exc.value.message + assert 'username' in exc.value.message diff --git a/tests/test_upload.py b/tests/test_upload.py index 252c540..9d92810 100644 --- a/tests/test_upload.py +++ b/tests/test_upload.py @@ -1,4 +1,5 @@ import json +import base64 import responses @@ -6,6 +7,9 @@ import mapbox username = 'testuser' +access_token = 'pk.{0}.test'.format( + base64.b64encode(b'{"u":"testuser"}').decode('utf-8')) + upload_response_body = """ {{"progress": 0, "modified": "date.test", @@ -30,12 +34,12 @@ def test_get_credentials(): responses.add( responses.GET, - 'https://api.mapbox.com/uploads/v1/{0}/credentials?access_token=pk.test'.format(username), + 'https://api.mapbox.com/uploads/v1/{0}/credentials?access_token={1}'.format(username, access_token), match_querystring=True, body=query_body, status=200, content_type='application/json') - res = mapbox.Uploader(username, access_token='pk.test')._get_credentials() + res = mapbox.Uploader(access_token=access_token)._get_credentials() assert res.status_code == 200 creds = res.json() assert username in creds['url'] @@ -48,18 +52,18 @@ def test_get_credentials(): def test_create(): responses.add( responses.POST, - 'https://api.mapbox.com/uploads/v1/{0}?access_token=pk.test'.format(username), + 'https://api.mapbox.com/uploads/v1/{0}?access_token={1}'.format(username, access_token), match_querystring=True, body=upload_response_body, status=201, content_type='application/json') - res = mapbox.Uploader(username, access_token='pk.test').create( + res = mapbox.Uploader(access_token=access_token).create( 'http://example.com/test.json', 'test1') # without username prefix assert res.status_code == 201 job = res.json() assert job['tileset'] == "{0}.test1".format(username) - res2 = mapbox.Uploader(username, access_token='pk.test').create( + res2 = mapbox.Uploader(access_token=access_token).create( 'http://example.com/test.json', 'testuser.test1') # also takes full tileset assert res2.status_code == 201 job = res2.json() @@ -86,11 +90,11 @@ def test_create_name(): responses.add_callback( responses.POST, - 'https://api.mapbox.com/uploads/v1/{0}?access_token=pk.test'.format(username), + 'https://api.mapbox.com/uploads/v1/{0}?access_token={1}'.format(username, access_token), match_querystring=True, callback=request_callback) - res = mapbox.Uploader(username, access_token='pk.test').create( + res = mapbox.Uploader(access_token=access_token).create( 'http://example.com/test.json', 'testuser.test1', name="testname") assert res.status_code == 201 job = res.json() @@ -101,12 +105,12 @@ def test_create_name(): def test_list(): responses.add( responses.GET, - 'https://api.mapbox.com/uploads/v1/{0}?access_token=pk.test'.format(username), + 'https://api.mapbox.com/uploads/v1/{0}?access_token={1}'.format(username, access_token), match_querystring=True, body="[{0}]".format(upload_response_body), status=200, content_type='application/json') - res = mapbox.Uploader(username, access_token='pk.test').list() + res = mapbox.Uploader(access_token=access_token).list() assert res.status_code == 200 uploads = res.json() assert len(uploads) == 1 @@ -118,12 +122,12 @@ def test_status(): job = json.loads(upload_response_body) responses.add( responses.GET, - 'https://api.mapbox.com/uploads/v1/{0}/{1}?access_token=pk.test'.format(username, job['id']), + 'https://api.mapbox.com/uploads/v1/{0}/{1}?access_token={2}'.format(username, job['id'], access_token), match_querystring=True, body=upload_response_body, status=200, content_type='application/json') - res = mapbox.Uploader(username, access_token='pk.test').status(job) + res = mapbox.Uploader(access_token=access_token).status(job) assert res.status_code == 200 status = res.json() assert job == status @@ -134,10 +138,10 @@ def test_delete(): job = json.loads(upload_response_body) responses.add( responses.DELETE, - 'https://api.mapbox.com/uploads/v1/{0}/{1}?access_token=pk.test'.format(username, job['id']), + 'https://api.mapbox.com/uploads/v1/{0}/{1}?access_token={2}'.format(username, job['id'], access_token), match_querystring=True, body=None, status=204, content_type='application/json') - res = mapbox.Uploader(username, access_token='pk.test').delete(job) + res = mapbox.Uploader(access_token=access_token).delete(job) assert res.status_code == 204
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 1 }, "num_modified_files": 2 }
0.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.4", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work boto3==1.23.10 botocore==1.26.10 certifi==2021.5.30 charset-normalizer==2.0.12 click==8.0.4 click-plugins==1.1.1 cligj==0.7.2 coverage==6.2 coveralls==3.3.1 distlib==0.3.9 docopt==0.6.2 filelock==3.4.1 idna==3.10 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work jmespath==0.10.0 -e git+https://github.com/mapbox/mapbox-sdk-py.git@06728ffc30fba83003e9c76645ecec3eec1c63de#egg=mapbox more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work platformdirs==2.4.0 pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 pytest-cov==4.0.0 python-dateutil==2.9.0.post0 requests==2.27.1 responses==0.17.0 s3transfer==0.5.2 six==1.17.0 toml @ file:///tmp/build/80754af9/toml_1616166611790/work tomli==1.2.3 tox==3.28.0 typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work uritemplate==4.1.1 uritemplate.py==3.0.2 urllib3==1.26.20 virtualenv==20.17.1 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: mapbox-sdk-py channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - boto3==1.23.10 - botocore==1.26.10 - charset-normalizer==2.0.12 - click==8.0.4 - click-plugins==1.1.1 - cligj==0.7.2 - coverage==6.2 - coveralls==3.3.1 - distlib==0.3.9 - docopt==0.6.2 - filelock==3.4.1 - idna==3.10 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - jmespath==0.10.0 - platformdirs==2.4.0 - pytest-cov==4.0.0 - python-dateutil==2.9.0.post0 - requests==2.27.1 - responses==0.17.0 - s3transfer==0.5.2 - six==1.17.0 - tomli==1.2.3 - tox==3.28.0 - uritemplate==4.1.1 - uritemplate-py==3.0.2 - urllib3==1.26.20 - virtualenv==20.17.1 prefix: /opt/conda/envs/mapbox-sdk-py
[ "tests/test_base.py::test_username", "tests/test_base.py::test_username_failures", "tests/test_upload.py::test_get_credentials", "tests/test_upload.py::test_create", "tests/test_upload.py::test_create_name", "tests/test_upload.py::test_list", "tests/test_upload.py::test_status", "tests/test_upload.py::test_delete" ]
[]
[ "tests/test_base.py::test_service_session", "tests/test_base.py::test_service_session_env", "tests/test_base.py::test_service_session_os_environ", "tests/test_base.py::test_service_session_os_environ_caps", "tests/test_base.py::test_product_token", "tests/test_base.py::test_user_agent", "tests/test_base.py::test_custom_messages" ]
[]
MIT License
322
andycasey__ads-36
5bd62ef2bf924116374455e222ea9ac8dc416b3a
2015-12-04 17:01:02
c039d67c2b2e9dad936758bc89df1fdd1cbd0aa1
diff --git a/ads/search.py b/ads/search.py index eb64e70..fed6c0c 100644 --- a/ads/search.py +++ b/ads/search.py @@ -275,7 +275,7 @@ class SearchQuery(BaseQuery): "title", "reference", "citation"] def __init__(self, query_dict=None, q=None, fq=None, fl=DEFAULT_FIELDS, - sort=None, start=0, rows=50, max_pages=3, **kwargs): + sort=None, start=0, rows=50, max_pages=1, **kwargs): """ constructor :param query_dict: raw query that will be sent unmodified. raw takes @@ -288,7 +288,7 @@ class SearchQuery(BaseQuery): :param start: solr "start" param (start) :param rows: solr "rows" param (rows) :param max_pages: Maximum number of pages to return. This value may - be modified after instansiation to increase the number of results + be modified after instantiation to increase the number of results :param kwargs: kwargs to add to `q` as "key:value" """ self._articles = [] @@ -385,7 +385,7 @@ class SearchQuery(BaseQuery): # if we have hit the max_pages limit, then iteration is done. page = math.ceil(len(self.articles)/self.query['rows']) - if page > self.max_pages: + if page >= self.max_pages: raise StopIteration("Maximum number of pages queried") # We aren't on the max_page of results nor do we have all
Number of returned results doesn't correspond to 'rows' key value in SearchQuery Example code: ```` In [5]: papers = ads.SearchQuery(q="supernova", sort="citation_count", rows=10) In [6]: print(len(list(papers))) 40 ```` Not massively important, but a bit surprising anyway. Any explanation? Thanks!
andycasey/ads
diff --git a/ads/tests/test_search.py b/ads/tests/test_search.py index 346d957..331a9f5 100644 --- a/ads/tests/test_search.py +++ b/ads/tests/test_search.py @@ -131,18 +131,24 @@ class TestSearchQuery(unittest.TestCase): self.assertEqual(len(sq.articles), 1) self.assertEqual(sq._query['start'], 1) self.assertEqual(next(sq).bibcode, '2012GCN..13229...1S') - self.assertEqual(len(list(sq)), 19) # 2 already returned + self.assertEqual(len(list(sq)), 18) # 2 already returned with self.assertRaisesRegexp( StopIteration, "Maximum number of pages queried"): next(sq) sq.max_pages = 500 - self.assertEqual(len(list(sq)), 28-19-2) + self.assertEqual(len(list(sq)), 28-18-2) with self.assertRaisesRegexp( StopIteration, "All records found"): next(sq) + # not setting max_pages should return the exact number of rows requests + sq = SearchQuery(q="unittest", rows=3) + with MockSolrResponse(sq.HTTP_ENDPOINT): + self.assertEqual(len(list(sq)), 3) + + def test_init(self): """ init should result in a properly formatted query attribute
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 1 }
0.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/andycasey/ads.git@5bd62ef2bf924116374455e222ea9ac8dc416b3a#egg=ads certifi==2025.1.31 charset-normalizer==3.4.1 exceptiongroup==1.2.2 httpretty==1.1.4 idna==3.10 iniconfig==2.1.0 MarkupSafe==3.0.2 mock==5.2.0 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 requests==2.32.3 six==1.17.0 tomli==2.2.1 urllib3==2.3.0 Werkzeug==3.1.3
name: ads channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - certifi==2025.1.31 - charset-normalizer==3.4.1 - exceptiongroup==1.2.2 - httpretty==1.1.4 - idna==3.10 - iniconfig==2.1.0 - markupsafe==3.0.2 - mock==5.2.0 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - requests==2.32.3 - six==1.17.0 - tomli==2.2.1 - urllib3==2.3.0 - werkzeug==3.1.3 prefix: /opt/conda/envs/ads
[ "ads/tests/test_search.py::TestSearchQuery::test_iter" ]
[]
[ "ads/tests/test_search.py::TestArticle::test_cached_properties", "ads/tests/test_search.py::TestArticle::test_equals", "ads/tests/test_search.py::TestArticle::test_get_field", "ads/tests/test_search.py::TestArticle::test_init", "ads/tests/test_search.py::TestArticle::test_print_methods", "ads/tests/test_search.py::TestSearchQuery::test_init", "ads/tests/test_search.py::TestSolrResponse::test_articles", "ads/tests/test_search.py::TestSolrResponse::test_init", "ads/tests/test_search.py::TestSolrResponse::test_load_http_response", "ads/tests/test_search.py::Testquery::test_init" ]
[]
MIT License
323
mozilla-services__requests-hawk-14
5cbc81acf71584413ee5a2a65bf8ac951f0f9c50
2015-12-05 19:17:42
5cbc81acf71584413ee5a2a65bf8ac951f0f9c50
diff --git a/CHANGES.txt b/CHANGES.txt index 0cce5ce..f312487 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,10 +1,14 @@ CHANGELOG ========= -0.3.0 (unreleased) +1.0.0 (unreleased) ------------------ -- Nothing changed yet. +- Simplified API for using HawkAuth when the id and key are known. (#8) +- Added support for overriding the default algorithm (sha256) when deriving + credentials from the hawk session token, via a new ``algorithm`` parameter. + +See the README for migration advice if you use the ``credentials`` parameter. 0.2.1 (2015-10-14) diff --git a/README.rst b/README.rst index 74ce1c9..8bb3930 100644 --- a/README.rst +++ b/README.rst @@ -11,16 +11,30 @@ This project allows you to use `the python requests library Hawk itself does not provide any mechanism for obtaining or transmitting the set of shared credentials required, but this project proposes a scheme we use -accross mozilla services projects. +across mozilla services projects. Great, how can I use it? ======================== First, you'll need to install it:: + .. code-block:: bash + pip install requests-hawk -Then, in your project, you can use it like that:: +Then, in your project, if you know the `id` and `key`, you can use:: + + .. code-block:: python + + import requests + from requests_hawk import HawkAuth + + hawk_auth = HawkAuth(id='my-hawk-id', key='my-hawk-secret-key') + requests.post("https://example.com/url", auth=hawk_auth) + +Or if you need to derive them from the hawk session token, instead use:: + + .. code-block:: python import requests from requests_hawk import HawkAuth @@ -31,6 +45,15 @@ Then, in your project, you can use it like that:: ) requests.post("/url", auth=hawk_auth) +In the second example, the ``server_url`` parameter to ``HawkAuth`` was used to +provide a default host name, to avoid having to repeat it for each request. + +If you wish to override the default algorithm of ``sha256``, pass the desired +algorithm name using the optional ``algorithm`` parameter. + +Note: The ``credentials`` parameter has been removed. Instead pass ``id`` and +``key`` separately (as above), or pass the existing dict as ``**credentials``. + Integration with httpie ======================= @@ -40,11 +63,15 @@ uses the requests library. We've made it simple for you to plug hawk with it. If you know the id and key, use it like that:: + .. code-block:: bash + http POST localhost:5000/registration\ --auth-type=hawk --auth='id:key' Or, if you want to use the hawk session token, you can do as follows:: + .. code-block:: bash + http POST localhost:5000/registration\ --auth-type=hawk --auth='c0d8cd2ec579a3599bef60f060412f01f5dc46f90465f42b5c47467481315f51:' @@ -59,15 +86,19 @@ Okay, on to the actual details. The server gives you a session token, that you'll need to derive to get the hawk credentials. -Do an HKDF derivation on the given session token. You’ll need to use the +Do an HKDF derivation on the given session token. You'll need to use the following parameters:: + .. code-block:: python + key_material = HKDF(hawk_session, '', 'identity.mozilla.com/picl/v1/sessionToken', 32*2) -The key material you’ll get out of the HKDF needs to be separated into two +The key material you'll get out of the HKDF needs to be separated into two parts, the first 32 hex characters are the ``hawk id``, and the next 32 ones are the ``hawk key``:: + .. code-block:: python + credentials = { 'id': keyMaterial[0:32] 'key': keyMaterial[32:64] @@ -79,4 +110,6 @@ Run tests To run test, you can use tox:: + .. code-block:: bash + tox diff --git a/requests_hawk/__init__.py b/requests_hawk/__init__.py index 0827744..6bd4f10 100644 --- a/requests_hawk/__init__.py +++ b/requests_hawk/__init__.py @@ -19,46 +19,52 @@ class HawkAuth(AuthBase): You don't need to set this parameter if you already know the hawk credentials (Optional). - :param credentials: - Python dict containing credentials information, with keys for "id", - "key" and "algorithm" (Optional). + :param id: + The hawk id string to use for authentication (Optional). + + :param key: + A string containing the hawk secret key (Optional). + + :param algorithm: + A string containing the name of the algorithm to be used. + (Optional, defaults to 'sha256'). :param server_url: The url of the server, this is useful for hawk when signing the requests. - In case this is omited, fallbacks to the value of the "Host" header of + In case this is omitted, fallbacks to the value of the "Host" header of the request (Optional). - - Note that the `hawk_session` and `credentials` parameters are mutually - exclusive. You should set one or the other. - + Note that the `hawk_session` and `id` parameters are mutually exclusive. + You should use either `hawk_session` or both `id` and 'key'. """ - def __init__(self, hawk_session=None, credentials=None, server_url=None, - _timestamp=None): - if ((credentials, hawk_session) == (None, None) - or (credentials is not None and hawk_session is not None)): + def __init__(self, hawk_session=None, id=None, key=None, algorithm='sha256', + credentials=None, server_url=None, _timestamp=None): + if credentials is not None: + raise AttributeError("The 'credentials' param has been removed. " + "Pass 'id' and 'key' instead, or '**credentials_dict'.") + + if (hawk_session and (id or key) + or not hawk_session and not (id and key)): raise AttributeError("You should pass either 'hawk_session' " - "or 'credentials'.") + "or both 'id' and 'key'.") - elif hawk_session is not None: + if hawk_session: try: hawk_session = codecs.decode(hawk_session, 'hex_codec') except binascii.Error as e: raise TypeError(e) keyInfo = 'identity.mozilla.com/picl/v1/sessionToken' keyMaterial = HKDF(hawk_session, "", keyInfo, 32*2) - credentials = { - 'id': codecs.encode(keyMaterial[:32], "hex_codec"), - 'key': codecs.encode(keyMaterial[32:64], "hex_codec"), - 'algorithm': 'sha256' - } - self.credentials = credentials + id = codecs.encode(keyMaterial[:32], "hex_codec") + key = codecs.encode(keyMaterial[32:64], "hex_codec") + + self.credentials = { + 'id': id, + 'key': key, + 'algorithm': algorithm + } self._timestamp = _timestamp - - if server_url is not None: - self.host = urlparse(server_url).netloc - else: - self.host = None + self.host = urlparse(server_url).netloc if server_url else None def __call__(self, r): if self.host is not None: @@ -119,17 +125,10 @@ try: auth_type = 'hawk' description = '' - def get_auth(self, id, key): - kwargs = {} - if key == '': - kwargs['hawk_session'] = id - else: - kwargs['credentials'] = { - 'id': id, - 'key': key, - 'algorithm': 'sha256' - } - return HawkAuth(**kwargs) + def get_auth(self, username, password): + if password == '': + return HawkAuth(hawk_session=username) + return HawkAuth(id=username, key=password) except ImportError: pass diff --git a/setup.cfg b/setup.cfg index d08cbd5..a94ac3b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -3,3 +3,7 @@ create-wheel = yes [bdist_wheel] universal = 1 + +[flake8] +# Override the default of 80 characters +max-line-length = 90 diff --git a/setup.py b/setup.py index 7cd658a..4831ee1 100644 --- a/setup.py +++ b/setup.py @@ -13,7 +13,7 @@ with codecs.open(os.path.join(here, 'CHANGES.txt'), encoding='utf-8') as f: requires = ['requests!=2.8.0', 'mohawk'] setup(name='requests-hawk', - version='0.3.0.dev0', + version='1.0.0.dev0', description='requests-hawk', long_description=README + '\n\n' + CHANGES, classifiers=[
Make instantiation with credentials simpler/cleaner We've just started to use Hawk for the Treeherder API and so are having to add support to our Python client that we make available for people to interact with the API, as well as update documentation for it. The current WIP docs for how people should use our Python client, has examples like: ```python from requests_hawk import HawkAuth from thclient import TreeherderClient ... auth = HawkAuth(credentials={ 'id': 'your-treeherder-client-id', 'key': 'your-secret', 'algorithm': 'sha256' }) client = TreeherderClient(protocol='https', host='treeherder.mozilla.org', auth=auth) client.post_collection('mozilla-central', tjc) ``` This is more verbose than I'd like (eg: users of our client shouldn't have to know we're using sha256) and passing the credentials as a dict doesn't seem very clean, so I was contemplating wrapping HawkAuth like so in our client code: ```python class TreeherderHawkAuth(HawkAuth): """Thin wrapper around HawkAuth to reduce boilerplate.""" def __init__(self, client_id, secret): credentials = { 'id': client_id, 'key': secret, 'algorithm': 'sha256' } return super(TreeherderHawkAuth, self).__init__(credentials=credentials) ``` So instead the example would become: ```python from thclient import TreeherderClient, TreeherderHawkAuth ... auth = TreeherderHawkAuth('your-treeherder-client-id', 'your-secret') client = TreeherderClient(protocol='https', host='treeherder.mozilla.org', auth=auth) client.post_collection('mozilla-central', tjc) ``` However we then have a wrapper around a wrapper whose purpose was to make using Hawk with requests simpler, which seems to defeat the point a bit. Would you be open to making changes to make this unnecessary? (eg defaulting to 'sha256' if no algorithm specified, being able to pass the credentials in a nicer form than a dict etc. eg a `HawkAuth(client_id='foo', secret='bar')` or `HawkAuth.from_credentials('foo', 'bar')`). Thanks :-)
mozilla-services/requests-hawk
diff --git a/requests_hawk/tests/test_hawkauth.py b/requests_hawk/tests/test_hawkauth.py index d91363c..ed0aa21 100644 --- a/requests_hawk/tests/test_hawkauth.py +++ b/requests_hawk/tests/test_hawkauth.py @@ -6,20 +6,32 @@ from requests_hawk import HawkAuth class TestHawkAuth(unittest.TestCase): - def test_hawkauth_errors_when_credentials_and_hawk_session_passed(self): + def test_hawkauth_errors_when_id_and_key_and_hawk_session_passed(self): self.assertRaises(AttributeError, HawkAuth, - credentials={}, hawk_session="test") + id='test', key='test', hawk_session="test") + + def test_hawkauth_errors_when_id_and_hawk_session_passed(self): + self.assertRaises(AttributeError, HawkAuth, + id='test', hawk_session="test") + + def test_hawkauth_errors_when_key_and_hawk_session_passed(self): + self.assertRaises(AttributeError, HawkAuth, + key='test', hawk_session="test") + + def test_hawkauth_errors_when_only_id_passed(self): + self.assertRaises(AttributeError, HawkAuth, id='test') + + def test_hawkauth_errors_when_only_key_passed(self): + self.assertRaises(AttributeError, HawkAuth, key='test') + + def test_hawkauth_errors_when_credentials_passed(self): + self.assertRaises(AttributeError, HawkAuth, credentials={}) def test_hawkauth_errors_when_no_auth_is_set(self): self.assertRaises(AttributeError, HawkAuth) - def test_hawk_auth_supports_credentials_as_dict(self): - credentials = { - 'id': 'test_id', - 'key': 'test_key', - 'algorithm': 'sha256' - } - auth = HawkAuth(credentials=credentials, _timestamp=1431698426) + def test_hawk_auth_supports_credentials_as_parameters(self): + auth = HawkAuth(id='test_id', key='test_key', _timestamp=1431698426) request = Request('PUT', 'http://www.example.com', json={"foo": "bar"}, auth=auth) r = request.prepare() @@ -32,6 +44,10 @@ class TestHawkAuth(unittest.TestCase): "Timestamp doesn't match") self.assertEqual(r.body, '{"foo": "bar"}') + def test_overriding_credentials_algorithm(self): + auth = HawkAuth(id='test_id', key='test_key', algorithm='sha1') + self.assertEqual(auth.credentials['algorithm'], 'sha1') + def test_key_non_hex_values_throws(self): self.assertRaises(TypeError, HawkAuth, hawk_session="test") @@ -45,6 +61,11 @@ class TestHawkAuth(unittest.TestCase): 'algorithm': 'sha256' }) + def test_overriding_session_algorithm(self): + auth = HawkAuth(hawk_session=codecs.encode(b"hello", "hex_codec"), + algorithm='sha1') + self.assertEqual(auth.credentials['algorithm'], 'sha1') + def test_server_url_is_parsed(self): auth = HawkAuth(hawk_session=codecs.encode(b"hello", "hex_codec"), server_url="http://localhost:5000")
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 5 }
0.2
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi==2025.1.31 charset-normalizer==3.4.1 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work idna==3.10 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work mohawk==1.1.0 packaging @ file:///croot/packaging_1734472117206/work pluggy @ file:///croot/pluggy_1733169602837/work pytest @ file:///croot/pytest_1738938843180/work requests==2.32.3 -e git+https://github.com/mozilla-services/requests-hawk.git@5cbc81acf71584413ee5a2a65bf8ac951f0f9c50#egg=requests_hawk six==1.17.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work urllib3==2.3.0
name: requests-hawk channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - certifi==2025.1.31 - charset-normalizer==3.4.1 - idna==3.10 - mohawk==1.1.0 - requests==2.32.3 - six==1.17.0 - urllib3==2.3.0 prefix: /opt/conda/envs/requests-hawk
[ "requests_hawk/tests/test_hawkauth.py::TestHawkAuth::test_hawkauth_errors_when_credentials_passed", "requests_hawk/tests/test_hawkauth.py::TestHawkAuth::test_hawkauth_errors_when_id_and_hawk_session_passed", "requests_hawk/tests/test_hawkauth.py::TestHawkAuth::test_hawkauth_errors_when_id_and_key_and_hawk_session_passed", "requests_hawk/tests/test_hawkauth.py::TestHawkAuth::test_hawkauth_errors_when_key_and_hawk_session_passed", "requests_hawk/tests/test_hawkauth.py::TestHawkAuth::test_hawkauth_errors_when_only_id_passed", "requests_hawk/tests/test_hawkauth.py::TestHawkAuth::test_hawkauth_errors_when_only_key_passed", "requests_hawk/tests/test_hawkauth.py::TestHawkAuth::test_overriding_credentials_algorithm", "requests_hawk/tests/test_hawkauth.py::TestHawkAuth::test_overriding_session_algorithm" ]
[ "requests_hawk/tests/test_hawkauth.py::TestHawkAuth::test_hawk_auth_is_called_when_json_present", "requests_hawk/tests/test_hawkauth.py::TestHawkAuth::test_hawk_auth_supports_credentials_as_parameters" ]
[ "requests_hawk/tests/test_hawkauth.py::TestHawkAuth::test_credentials_are_derived_from_session", "requests_hawk/tests/test_hawkauth.py::TestHawkAuth::test_hawk_auth_can_handle_a_timestamp_argument", "requests_hawk/tests/test_hawkauth.py::TestHawkAuth::test_hawkauth_errors_when_no_auth_is_set", "requests_hawk/tests/test_hawkauth.py::TestHawkAuth::test_key_non_hex_values_throws", "requests_hawk/tests/test_hawkauth.py::TestHawkAuth::test_server_url_is_parsed" ]
[]
Apache License 2.0
324
sympy__sympy-10206
5b23c89663d3b5267124f4c1fa3d1763719bf54f
2015-12-07 03:17:56
0241b35cae5f2adc04dc37b25f7dc9c5f00bd746
diff --git a/sympy/core/mul.py b/sympy/core/mul.py index 30a51adb8e..2a0e6090e4 100644 --- a/sympy/core/mul.py +++ b/sympy/core/mul.py @@ -368,17 +368,17 @@ def flatten(cls, seq): # gather exponents of common bases... def _gather(c_powers): + new_c_powers = [] common_b = {} # b:e for b, e in c_powers: co = e.as_coeff_Mul() - common_b.setdefault(b, {}).setdefault( - co[1], []).append(co[0]) + common_b.setdefault(b, {}).setdefault(co[1], []).append(co[0]) for b, d in common_b.items(): for di, li in d.items(): d[di] = Add(*li) - new_c_powers = [] for b, e in common_b.items(): - new_c_powers.extend([(b, c*t) for t, c in e.items()]) + for t, c in e.items(): + new_c_powers.append((b, c*t)) return new_c_powers # in c_powers @@ -402,45 +402,14 @@ def _gather(c_powers): # 0 1 # x -> 1 x -> x - - # this should only need to run twice; if it fails because - # it needs to be run more times, perhaps this should be - # changed to a "while True" loop -- the only reason it - # isn't such now is to allow a less-than-perfect result to - # be obtained rather than raising an error or entering an - # infinite loop - for i in range(2): - new_c_powers = [] - changed = False - for b, e in c_powers: - if e.is_zero: - continue - if e is S.One: - if b.is_Number: - coeff *= b - continue - p = b - if e is not S.One: - p = Pow(b, e) - # check to make sure that the base doesn't change - # after exponentiation; to allow for unevaluated - # Pow, we only do so if b is not already a Pow - if p.is_Pow and not b.is_Pow: - bi = b - b, e = p.as_base_exp() - if b != bi: - changed = True - c_part.append(p) - new_c_powers.append((b, e)) - # there might have been a change, but unless the base - # matches some other base, there is nothing to do - if changed and len(set( - b for b, e in new_c_powers)) != len(new_c_powers): - # start over again - c_part = [] - c_powers = _gather(new_c_powers) - else: - break + for b, e in c_powers: + if e is S.One: + if b.is_Number: + coeff *= b + else: + c_part.append(b) + elif e is not S.Zero: + c_part.append(Pow(b, e)) # x x x # 2 * 3 -> 6 diff --git a/sympy/polys/polyutils.py b/sympy/polys/polyutils.py index 28f9876313..0e74704deb 100644 --- a/sympy/polys/polyutils.py +++ b/sympy/polys/polyutils.py @@ -262,7 +262,7 @@ def _is_coeff(factor): else: base, exp = decompose_power_rat(factor) - elements[base] = elements.setdefault(base, 0) + exp + elements[base] = exp gens.add(base) terms.append((coeff, elements)) diff --git a/sympy/solvers/inequalities.py b/sympy/solvers/inequalities.py index 4a9176d95e..e5c5563e3a 100644 --- a/sympy/solvers/inequalities.py +++ b/sympy/solvers/inequalities.py @@ -2,7 +2,7 @@ from __future__ import print_function, division -from sympy.core import Symbol, Dummy +from sympy.core import Symbol, Dummy, sympify from sympy.core.compatibility import iterable, reduce from sympy.sets import Interval from sympy.core.relational import Relational, Eq, Ge, Lt @@ -481,8 +481,6 @@ def valid(x): def _solve_inequality(ie, s): """ A hacky replacement for solve, since the latter only works for univariate inequalities. """ - if not ie.rel_op in ('>', '>=', '<', '<='): - raise NotImplementedError expr = ie.lhs - ie.rhs try: p = Poly(expr, s) @@ -490,15 +488,14 @@ def _solve_inequality(ie, s): raise NotImplementedError except (PolynomialError, NotImplementedError): try: - n, d = expr.as_numer_denom() return reduce_rational_inequalities([[ie]], s) except PolynomialError: return solve_univariate_inequality(ie, s) a, b = p.all_coeffs() - if a.is_positive: + if a.is_positive or ie.rel_op in ('!=', '=='): return ie.func(s, -b/a) elif a.is_negative: - return ie.func(-b/a, s) + return ie.reversed.func(s, -b/a) else: raise NotImplementedError @@ -572,6 +569,22 @@ def reduce_inequalities(inequalities, symbols=[]): """ if not iterable(inequalities): inequalities = [inequalities] + inequalities = [sympify(i) for i in inequalities] + + gens = set().union(*[i.free_symbols for i in inequalities]) + + if not iterable(symbols): + symbols = [symbols] + symbols = (set(symbols) or gens) & gens + if any(i.is_real is False for i in symbols): + raise TypeError(filldedent(''' + inequalities cannot contain symbols that are not real.''')) + + # make vanilla symbol real + recast = dict([(i, Dummy(i.name, real=True)) + for i in gens if i.is_real is None]) + inequalities = [i.xreplace(recast) for i in inequalities] + symbols = set([i.xreplace(recast) for i in symbols]) # prefilter keep = [] @@ -591,18 +604,6 @@ def reduce_inequalities(inequalities, symbols=[]): inequalities = keep del keep - gens = reduce(set.union, [i.free_symbols for i in inequalities], set()) - - if not iterable(symbols): - symbols = [symbols] - symbols = set(symbols) or gens - - # make vanilla symbol real - recast = dict([(i, Dummy(i.name, real=True)) - for i in gens if i.is_real is None]) - inequalities = [i.xreplace(recast) for i in inequalities] - symbols = set([i.xreplace(recast) for i in symbols]) - # solve system rv = _reduce_inequalities(inequalities, symbols) diff --git a/sympy/solvers/solveset.py b/sympy/solvers/solveset.py index c60073f977..132c550e37 100644 --- a/sympy/solvers/solveset.py +++ b/sympy/solvers/solveset.py @@ -12,6 +12,7 @@ from sympy.core.numbers import I, Number, Rational, oo from sympy.core.function import (Lambda, expand, expand_complex) from sympy.core.relational import Eq +from sympy.core.symbol import Symbol from sympy.simplify.simplify import fraction, trigsimp from sympy.functions import (log, Abs, tan, cot, sin, cos, sec, csc, exp, acos, asin, atan, acsc, asec, arg, @@ -183,6 +184,7 @@ def _invert_real(f, g_ys, symbol): tan_cot_invs = Union(*[imageset(Lambda(n, n*pi + f.inverse()(g_y)), \ S.Integers) for g_y in g_ys]) return _invert_real(f.args[0], tan_cot_invs, symbol) + return (f, g_ys) @@ -446,12 +448,13 @@ def solveset_real(f, symbol): (-oo, oo) """ - if not symbol.is_Symbol: - raise ValueError(" %s is not a symbol" % (symbol)) + if not getattr(symbol, 'is_Symbol', False): + raise ValueError('A Symbol must be given, not type %s: %s' % + (type(symbol), symbol)) f = sympify(f) if not isinstance(f, (Expr, Number)): - raise ValueError(" %s is not a valid sympy expression" % (f)) + raise ValueError("%s is not a valid SymPy expression" % (f)) original_eq = f f = together(f) @@ -462,8 +465,8 @@ def solveset_real(f, symbol): if not symbol in fraction(f)[1].free_symbols and f.is_rational_function(): f = expand(f) - if f.has(Piecewise): - f = piecewise_fold(f) + f = piecewise_fold(f) + result = EmptySet() if f.expand().is_zero: @@ -770,8 +773,9 @@ def solveset_complex(f, symbol): ImageSet(Lambda(_n, 2*_n*I*pi), Integers()) """ - if not symbol.is_Symbol: - raise ValueError(" %s is not a symbol" % (symbol)) + if not getattr(symbol, 'is_Symbol', False): + raise ValueError('A Symbol must be given, not type %s: %s' % + (type(symbol), symbol)) f = sympify(f) original_eq = f @@ -840,7 +844,7 @@ def solveset(f, symbol=None, domain=S.Complexes): Set A set of values for `symbol` for which `f` is True or is equal to - zero. An `EmptySet` is returned if no solution is found. + zero. An `EmptySet` is returned if `f` is False or nonzero. A `ConditionSet` is returned as unsolved object if algorithms to evaluatee complete solution are not yet implemented. @@ -864,6 +868,15 @@ def solveset(f, symbol=None, domain=S.Complexes): variable being solved for and instead, uses the `domain` parameter to decide which solver to use. + Notes + ===== + + Python interprets 0 and 1 as False and True, respectively, but + in this function they refer to solutions of an expression. So 0 and 1 + return the Domain and EmptySet, respectively, while True and False + return the opposite (as they are assumed to be solutions of relational + expressions). + See Also ======== @@ -901,26 +914,36 @@ def solveset(f, symbol=None, domain=S.Complexes): """ - from sympy.solvers.inequalities import solve_univariate_inequality + f = sympify(f) + + if f is S.true: + return domain + + if f is S.false: + return S.EmptySet + + free_symbols = f.free_symbols + + if not free_symbols: + b = Eq(f, 0) + if b is S.true: + return domain + elif b is S.false: + return S.EmptySet + else: + raise NotImplementedError(filldedent(''' + relationship between value and 0 is unknown: %s''' % b)) if symbol is None: - free_symbols = f.free_symbols if len(free_symbols) == 1: symbol = free_symbols.pop() else: raise ValueError(filldedent(''' The independent variable must be specified for a multivariate equation.''')) - elif not symbol.is_Symbol: - raise ValueError('A Symbol must be given, not type %s: %s' % (type(symbol), symbol)) - - f = sympify(f) - - if f is S.false: - return EmptySet() - - if f is S.true: - return domain + elif not getattr(symbol, 'is_Symbol', False): + raise ValueError('A Symbol must be given, not type %s: %s' % + (type(symbol), symbol)) if isinstance(f, Eq): from sympy.core import Add
reduce_inequalities error ```python >>> reduce_inequalities(x**2>=0) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "sympy\solvers\inequalities.py", line 605, in reduce_inequalities rv = _reduce_inequalities(inequalities, symbols) File "sympy\solvers\inequalities.py", line 512, in _reduce_inequalities expr, rel = inequality.lhs, inequality.rel_op # rhs is 0 AttributeError: 'BooleanTrue' object has no attribute 'lhs' ```
sympy/sympy
diff --git a/sympy/core/tests/test_expr.py b/sympy/core/tests/test_expr.py index 5313766db9..49b3d5e614 100644 --- a/sympy/core/tests/test_expr.py +++ b/sympy/core/tests/test_expr.py @@ -1686,13 +1686,7 @@ def test_issue_6325(): e.diff(t, 2) == ans assert diff(e, t, 2, simplify=False) != ans - def test_issue_7426(): f1 = a % c f2 = x % z assert f1.equals(f2) == False - - -def test_issue_10161(): - x = symbols('x', real=True) - assert x*abs(x)*abs(x) == x**3 diff --git a/sympy/polys/tests/test_polyutils.py b/sympy/polys/tests/test_polyutils.py index 7b27d42f14..2ea39b7b7a 100644 --- a/sympy/polys/tests/test_polyutils.py +++ b/sympy/polys/tests/test_polyutils.py @@ -1,7 +1,6 @@ """Tests for useful utilities for higher level polynomial classes. """ -from sympy import (S, Integer, sin, cos, sqrt, symbols, pi, - Eq, Integral, exp, Mul) +from sympy import S, Integer, sin, cos, sqrt, symbols, pi, Eq, Integral, exp from sympy.utilities.pytest import raises from sympy.polys.polyutils import ( @@ -270,8 +269,6 @@ def test__parallel_dict_from_expr_no_gens(): assert parallel_dict_from_expr([x*y, 2*z, Integer(3)]) == \ ([{(1, 1, 0): Integer( 1)}, {(0, 0, 1): Integer(2)}, {(0, 0, 0): Integer(3)}], (x, y, z)) - assert parallel_dict_from_expr((Mul(x, x**2, evaluate=False),)) == \ - ([{(3,): 1}], (x,)) def test_parallel_dict_from_expr(): diff --git a/sympy/solvers/tests/test_inequalities.py b/sympy/solvers/tests/test_inequalities.py index ef9ca711ab..2c595b787d 100644 --- a/sympy/solvers/tests/test_inequalities.py +++ b/sympy/solvers/tests/test_inequalities.py @@ -185,6 +185,7 @@ def test_reduce_abs_inequalities(): nr = Symbol('nr', real=False) raises(TypeError, lambda: reduce_inequalities(abs(nr - 5) < 3)) + assert reduce_inequalities(x < 3, symbols=[x, nr]) == And(-oo < x, x < 3) def test_reduce_inequalities_general(): @@ -196,6 +197,7 @@ def test_reduce_inequalities_boolean(): assert reduce_inequalities( [Eq(x**2, 0), True]) == Eq(x, 0) assert reduce_inequalities([Eq(x**2, 0), False]) == False + assert reduce_inequalities(x**2 >= 0) is S.true # issue 10196 def test_reduce_inequalities_multivariate(): @@ -212,6 +214,8 @@ def test_reduce_inequalities_errors(): def test_hacky_inequalities(): assert reduce_inequalities(x + y < 1, symbols=[x]) == (x < 1 - y) assert reduce_inequalities(x + y >= 1, symbols=[x]) == (x >= 1 - y) + assert reduce_inequalities(Eq(0, x - y), symbols=[x]) == Eq(x, y) + assert reduce_inequalities(Ne(0, x - y), symbols=[x]) == Ne(x, y) def test_issue_6343(): @@ -242,11 +246,11 @@ def test_issue_8235(): def test_issue_5526(): assert reduce_inequalities(S(0) <= x + Integral(y**2, (y, 1, 3)) - 1, [x]) == \ - (-Integral(y**2, (y, 1, 3)) + 1 <= x) + (x >= -Integral(y**2, (y, 1, 3)) + 1) f = Function('f') e = Sum(f(x), (x, 1, 3)) assert reduce_inequalities(S(0) <= x + e + y**2, [x]) == \ - (-y**2 - Sum(f(x), (x, 1, 3)) <= x) + (x >= -y**2 - Sum(f(x), (x, 1, 3))) def test_solve_univariate_inequality(): diff --git a/sympy/solvers/tests/test_solveset.py b/sympy/solvers/tests/test_solveset.py index 8f96ab440a..ba77f552fc 100644 --- a/sympy/solvers/tests/test_solveset.py +++ b/sympy/solvers/tests/test_solveset.py @@ -225,6 +225,7 @@ def test_is_function_class_equation(): def test_garbage_input(): + raises(ValueError, lambda: solveset_real(x, 1)) raises(ValueError, lambda: solveset_real([x], x)) raises(ValueError, lambda: solveset_real(x, pi)) raises(ValueError, lambda: solveset_real(x, x**2)) @@ -743,6 +744,7 @@ def test_solve_complex_unsolvable(): solution = solveset_complex(cos(x) - S.Half, x) assert solution == unsolved_object + @XFAIL def test_solve_trig_simplified(): from sympy.abc import n @@ -852,6 +854,12 @@ def test_solve_lambert(): def test_solveset(): x = Symbol('x') raises(ValueError, lambda: solveset(x + y)) + raises(ValueError, lambda: solveset(x, 1)) + + assert solveset(0, domain=S.Reals) == S.Reals + assert solveset(1) == S.EmptySet + assert solveset(True, domain=S.Reals) == S.Reals # issue 10197 + assert solveset(False, domain=S.Reals) == S.EmptySet assert solveset(exp(x) - 1, domain=S.Reals) == FiniteSet(0) assert solveset(exp(x) - 1, x, S.Reals) == FiniteSet(0)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 4 }
0.7
{ "env_vars": null, "env_yml_path": [], "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "", "pip_packages": [ "pytest" ], "pre_install": [], "python": "3.6", "reqs_path": [], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 importlib-metadata==4.8.3 iniconfig==1.1.1 mpmath==1.3.0 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 -e git+https://github.com/sympy/sympy.git@5b23c89663d3b5267124f4c1fa3d1763719bf54f#egg=sympy tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: sympy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - mpmath==1.3.0 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/sympy
[ "sympy/solvers/tests/test_inequalities.py::test_reduce_inequalities_boolean", "sympy/solvers/tests/test_inequalities.py::test_hacky_inequalities", "sympy/solvers/tests/test_inequalities.py::test_issue_5526", "sympy/solvers/tests/test_solveset.py::test_garbage_input", "sympy/solvers/tests/test_solveset.py::test_solveset" ]
[]
[ "sympy/core/tests/test_expr.py::test_basic", "sympy/core/tests/test_expr.py::test_ibasic", "sympy/core/tests/test_expr.py::test_relational", "sympy/core/tests/test_expr.py::test_relational_assumptions", "sympy/core/tests/test_expr.py::test_relational_noncommutative", "sympy/core/tests/test_expr.py::test_basic_nostr", "sympy/core/tests/test_expr.py::test_series_expansion_for_uniform_order", "sympy/core/tests/test_expr.py::test_leadterm", "sympy/core/tests/test_expr.py::test_as_leading_term", "sympy/core/tests/test_expr.py::test_leadterm2", "sympy/core/tests/test_expr.py::test_leadterm3", "sympy/core/tests/test_expr.py::test_as_leading_term2", "sympy/core/tests/test_expr.py::test_as_leading_term3", "sympy/core/tests/test_expr.py::test_as_leading_term4", "sympy/core/tests/test_expr.py::test_as_leading_term_stub", "sympy/core/tests/test_expr.py::test_atoms", "sympy/core/tests/test_expr.py::test_is_polynomial", "sympy/core/tests/test_expr.py::test_is_rational_function", "sympy/core/tests/test_expr.py::test_is_algebraic_expr", "sympy/core/tests/test_expr.py::test_SAGE1", "sympy/core/tests/test_expr.py::test_SAGE2", "sympy/core/tests/test_expr.py::test_SAGE3", "sympy/core/tests/test_expr.py::test_len", "sympy/core/tests/test_expr.py::test_doit", "sympy/core/tests/test_expr.py::test_attribute_error", "sympy/core/tests/test_expr.py::test_args", "sympy/core/tests/test_expr.py::test_noncommutative_expand_issue_3757", "sympy/core/tests/test_expr.py::test_as_numer_denom", "sympy/core/tests/test_expr.py::test_as_independent", "sympy/core/tests/test_expr.py::test_replace", "sympy/core/tests/test_expr.py::test_find", "sympy/core/tests/test_expr.py::test_count", "sympy/core/tests/test_expr.py::test_has_basics", "sympy/core/tests/test_expr.py::test_has_multiple", "sympy/core/tests/test_expr.py::test_has_piecewise", "sympy/core/tests/test_expr.py::test_has_iterative", "sympy/core/tests/test_expr.py::test_has_integrals", "sympy/core/tests/test_expr.py::test_has_tuple", "sympy/core/tests/test_expr.py::test_has_units", "sympy/core/tests/test_expr.py::test_has_polys", "sympy/core/tests/test_expr.py::test_has_physics", "sympy/core/tests/test_expr.py::test_as_poly_as_expr", "sympy/core/tests/test_expr.py::test_nonzero", "sympy/core/tests/test_expr.py::test_is_number", "sympy/core/tests/test_expr.py::test_as_coeff_add", "sympy/core/tests/test_expr.py::test_as_coeff_mul", "sympy/core/tests/test_expr.py::test_as_coeff_exponent", "sympy/core/tests/test_expr.py::test_extractions", "sympy/core/tests/test_expr.py::test_nan_extractions", "sympy/core/tests/test_expr.py::test_coeff", "sympy/core/tests/test_expr.py::test_coeff2", "sympy/core/tests/test_expr.py::test_coeff2_0", "sympy/core/tests/test_expr.py::test_coeff_expand", "sympy/core/tests/test_expr.py::test_integrate", "sympy/core/tests/test_expr.py::test_as_base_exp", "sympy/core/tests/test_expr.py::test_issue_4963", "sympy/core/tests/test_expr.py::test_action_verbs", "sympy/core/tests/test_expr.py::test_as_powers_dict", "sympy/core/tests/test_expr.py::test_as_coefficients_dict", "sympy/core/tests/test_expr.py::test_args_cnc", "sympy/core/tests/test_expr.py::test_new_rawargs", "sympy/core/tests/test_expr.py::test_issue_5226", "sympy/core/tests/test_expr.py::test_free_symbols", "sympy/core/tests/test_expr.py::test_issue_5300", "sympy/core/tests/test_expr.py::test_as_coeff_Mul", "sympy/core/tests/test_expr.py::test_as_coeff_Add", "sympy/core/tests/test_expr.py::test_expr_sorting", "sympy/core/tests/test_expr.py::test_as_ordered_factors", "sympy/core/tests/test_expr.py::test_as_ordered_terms", "sympy/core/tests/test_expr.py::test_sort_key_atomic_expr", "sympy/core/tests/test_expr.py::test_issue_4199", "sympy/core/tests/test_expr.py::test_eval_interval_zoo", "sympy/core/tests/test_expr.py::test_primitive", "sympy/core/tests/test_expr.py::test_issue_5843", "sympy/core/tests/test_expr.py::test_is_constant", "sympy/core/tests/test_expr.py::test_equals", "sympy/core/tests/test_expr.py::test_random", "sympy/core/tests/test_expr.py::test_round", "sympy/core/tests/test_expr.py::test_round_exception_nostr", "sympy/core/tests/test_expr.py::test_extract_branch_factor", "sympy/core/tests/test_expr.py::test_identity_removal", "sympy/core/tests/test_expr.py::test_float_0", "sympy/core/tests/test_expr.py::test_issue_6325", "sympy/core/tests/test_expr.py::test_issue_7426", "sympy/polys/tests/test_polyutils.py::test__nsort", "sympy/polys/tests/test_polyutils.py::test__sort_gens", "sympy/polys/tests/test_polyutils.py::test__unify_gens", "sympy/polys/tests/test_polyutils.py::test__analyze_gens", "sympy/polys/tests/test_polyutils.py::test__sort_factors", "sympy/polys/tests/test_polyutils.py::test__dict_from_expr_if_gens", "sympy/polys/tests/test_polyutils.py::test__dict_from_expr_no_gens", "sympy/polys/tests/test_polyutils.py::test__parallel_dict_from_expr_if_gens", "sympy/polys/tests/test_polyutils.py::test__parallel_dict_from_expr_no_gens", "sympy/polys/tests/test_polyutils.py::test_parallel_dict_from_expr", "sympy/polys/tests/test_polyutils.py::test_dict_from_expr", "sympy/solvers/tests/test_inequalities.py::test_solve_poly_inequality", "sympy/solvers/tests/test_inequalities.py::test_reduce_poly_inequalities_real_interval", "sympy/solvers/tests/test_inequalities.py::test_reduce_poly_inequalities_complex_relational", "sympy/solvers/tests/test_inequalities.py::test_reduce_rational_inequalities_real_relational", "sympy/solvers/tests/test_inequalities.py::test_reduce_abs_inequalities", "sympy/solvers/tests/test_inequalities.py::test_reduce_inequalities_general", "sympy/solvers/tests/test_inequalities.py::test_reduce_inequalities_multivariate", "sympy/solvers/tests/test_inequalities.py::test_reduce_inequalities_errors", "sympy/solvers/tests/test_inequalities.py::test_issue_6343", "sympy/solvers/tests/test_inequalities.py::test_issue_8235", "sympy/solvers/tests/test_inequalities.py::test_solve_univariate_inequality", "sympy/solvers/tests/test_inequalities.py::test_issue_9954", "sympy/solvers/tests/test_inequalities.py::test_slow_general_univariate", "sympy/solvers/tests/test_inequalities.py::test_issue_8545", "sympy/solvers/tests/test_inequalities.py::test_issue_8974", "sympy/solvers/tests/test_inequalities.py::test_issue_10047", "sympy/solvers/tests/test_solveset.py::test_invert_real", "sympy/solvers/tests/test_solveset.py::test_invert_complex", "sympy/solvers/tests/test_solveset.py::test_domain_check", "sympy/solvers/tests/test_solveset.py::test_is_function_class_equation", "sympy/solvers/tests/test_solveset.py::test_solve_mul", "sympy/solvers/tests/test_solveset.py::test_solve_invert", "sympy/solvers/tests/test_solveset.py::test_errorinverses", "sympy/solvers/tests/test_solveset.py::test_solve_polynomial", "sympy/solvers/tests/test_solveset.py::test_return_root_of", "sympy/solvers/tests/test_solveset.py::test__has_rational_power", "sympy/solvers/tests/test_solveset.py::test_solveset_sqrt_1", "sympy/solvers/tests/test_solveset.py::test_solveset_sqrt_2", "sympy/solvers/tests/test_solveset.py::test_solve_sqrt_3", "sympy/solvers/tests/test_solveset.py::test_solve_polynomial_symbolic_param", "sympy/solvers/tests/test_solveset.py::test_solve_rational", "sympy/solvers/tests/test_solveset.py::test_solveset_real_gen_is_pow", "sympy/solvers/tests/test_solveset.py::test_no_sol", "sympy/solvers/tests/test_solveset.py::test_sol_zero_real", "sympy/solvers/tests/test_solveset.py::test_no_sol_rational_extragenous", "sympy/solvers/tests/test_solveset.py::test_solve_polynomial_cv_1a", "sympy/solvers/tests/test_solveset.py::test_solveset_real_rational", "sympy/solvers/tests/test_solveset.py::test_solveset_real_log", "sympy/solvers/tests/test_solveset.py::test_poly_gens", "sympy/solvers/tests/test_solveset.py::test_solve_abs", "sympy/solvers/tests/test_solveset.py::test_real_imag_splitting", "sympy/solvers/tests/test_solveset.py::test_units", "sympy/solvers/tests/test_solveset.py::test_solve_only_exp_1", "sympy/solvers/tests/test_solveset.py::test_atan2", "sympy/solvers/tests/test_solveset.py::test_piecewise", "sympy/solvers/tests/test_solveset.py::test_solveset_complex_polynomial", "sympy/solvers/tests/test_solveset.py::test_sol_zero_complex", "sympy/solvers/tests/test_solveset.py::test_solveset_complex_rational", "sympy/solvers/tests/test_solveset.py::test_solveset_complex_exp", "sympy/solvers/tests/test_solveset.py::test_solve_complex_log", "sympy/solvers/tests/test_solveset.py::test_solve_complex_sqrt", "sympy/solvers/tests/test_solveset.py::test_solveset_complex_tan", "sympy/solvers/tests/test_solveset.py::test_solve_trig", "sympy/solvers/tests/test_solveset.py::test_solve_invalid_sol", "sympy/solvers/tests/test_solveset.py::test_solve_complex_unsolvable", "sympy/solvers/tests/test_solveset.py::test_conditonset", "sympy/solvers/tests/test_solveset.py::test_solveset_domain", "sympy/solvers/tests/test_solveset.py::test_improve_coverage", "sympy/solvers/tests/test_solveset.py::test_issue_9522", "sympy/solvers/tests/test_solveset.py::test_linear_eq_to_matrix", "sympy/solvers/tests/test_solveset.py::test_linsolve", "sympy/solvers/tests/test_solveset.py::test_issue_9556", "sympy/solvers/tests/test_solveset.py::test_issue_9611", "sympy/solvers/tests/test_solveset.py::test_issue_9557", "sympy/solvers/tests/test_solveset.py::test_issue_9778", "sympy/solvers/tests/test_solveset.py::test_issue_9849", "sympy/solvers/tests/test_solveset.py::test_issue_9953", "sympy/solvers/tests/test_solveset.py::test_issue_9913" ]
[]
BSD
325
typesafehub__conductr-cli-90
5947a412f5c61a10f5e5f0aa2da85cac2c66faca
2015-12-08 23:44:42
1df5ea26ef321f9aca5cb5cab60c4388baab0ffc
diff --git a/conductr_cli/bundle_installation.py b/conductr_cli/bundle_installation.py index 885499a..a97e64a 100644 --- a/conductr_cli/bundle_installation.py +++ b/conductr_cli/bundle_installation.py @@ -21,29 +21,45 @@ def count_installations(bundle_id, args): return 0 +def wait_for_uninstallation(bundle_id, args): + return wait_for_condition(bundle_id, is_uninstalled, 'uninstalled', args) + + def wait_for_installation(bundle_id, args): + return wait_for_condition(bundle_id, is_installed, 'installed', args) + + +def wait_for_condition(bundle_id, condition, condition_name, args): log = logging.getLogger(__name__) start_time = datetime.now() installed_bundles = count_installations(bundle_id, args) - if installed_bundles > 0: - log.info('Bundle {} is installed'.format(bundle_id)) + if condition(installed_bundles): + log.info('Bundle {} is {}'.format(bundle_id, condition_name)) return else: - log.info('Bundle {} waiting to be installed'.format(bundle_id)) + log.info('Bundle {} waiting to be {}'.format(bundle_id, condition_name)) bundle_events_url = conduct_url.url('bundles/events', args) sse_events = sse_client.get_events(bundle_events_url) for event in sse_events: elapsed = (datetime.now() - start_time).total_seconds() if elapsed > args.wait_timeout: - raise WaitTimeoutError('Bundle {} waiting to be installed'.format(bundle_id)) + raise WaitTimeoutError('Bundle {} waiting to be {}'.format(bundle_id, condition_name)) if event.event and event.event.startswith('bundleInstallation'): installed_bundles = count_installations(bundle_id, args) - if installed_bundles > 0: - log.info('Bundle {} installed'.format(bundle_id)) + if condition(installed_bundles): + log.info('Bundle {} {}'.format(bundle_id, condition_name)) return else: - log.info('Bundle {} still waiting to be installed'.format(bundle_id)) + log.info('Bundle {} still waiting to be {}'.format(bundle_id, condition_name)) + + raise WaitTimeoutError('Bundle {} still waiting to be {}'.format(bundle_id, condition_name)) + + +def is_installed(number_of_installations): + return number_of_installations > 0 + - raise WaitTimeoutError('Bundle {} still waiting to be installed'.format(bundle_id)) +def is_uninstalled(number_of_installations): + return number_of_installations <= 0 diff --git a/conductr_cli/conduct.py b/conductr_cli/conduct.py index 7b8c042..1e6f945 100755 --- a/conductr_cli/conduct.py +++ b/conductr_cli/conduct.py @@ -205,6 +205,8 @@ def build_parser(): unload_parser.add_argument('bundle', help='The ID of the bundle') add_default_arguments(unload_parser) + add_wait_timeout(unload_parser) + add_no_wait(unload_parser) unload_parser.set_defaults(func=conduct_unload.unload) # Sub-parser for `events` sub-command diff --git a/conductr_cli/conduct_unload.py b/conductr_cli/conduct_unload.py index 5e1f47a..24f7466 100644 --- a/conductr_cli/conduct_unload.py +++ b/conductr_cli/conduct_unload.py @@ -1,4 +1,5 @@ -from conductr_cli import conduct_url, validation +from conductr_cli import conduct_url, validation, bundle_installation +import json import logging import requests from conductr_cli.http import DEFAULT_HTTP_TIMEOUT @@ -19,6 +20,14 @@ def unload(args): log.verbose(validation.pretty_json(response.text)) log.info('Bundle unload request sent.') + + response_json = json.loads(response.text) + if not args.no_wait: + bundle_installation.wait_for_uninstallation(response_json['bundleId'], args) + log.info('Print ConductR info with: conduct info{}'.format(args.cli_parameters)) + if not log.is_info_enabled() and log.is_quiet_enabled(): + log.quiet(response_json['bundleId']) + return True
Introduce wait behaviour for conduct unload command The `conduct load`, `conduct run`, and `conduct stop` has its default behaviour (to wait) and `--no-wait` option. Implement the same behaviour for `conduct unload` command to allow scripting of the stop and uninstallation of bundles within ConductR
typesafehub/conductr-cli
diff --git a/conductr_cli/test/test_bundle_installation.py b/conductr_cli/test/test_bundle_installation.py index 567b81a..eb32335 100644 --- a/conductr_cli/test/test_bundle_installation.py +++ b/conductr_cli/test/test_bundle_installation.py @@ -8,8 +8,14 @@ except ImportError: from mock import call, patch, MagicMock +def create_test_event(event_name): + sse_mock = MagicMock() + sse_mock.event = event_name + return sse_mock + + class TestCountInstallation(CliTestCase): - def test_return_scale(self): + def test_return_installation_count(self): bundles_endpoint_reply = """ [{ "bundleId": "a101449418187d92c789d1adc240b6d6", @@ -36,7 +42,7 @@ class TestCountInstallation(CliTestCase): http_method.assert_called_with('http://127.0.0.1:9005/bundles') - def test_return_scale_v2(self): + def test_return_installation_count_v2(self): bundles_endpoint_reply = """ [{ "bundleId": "a101449418187d92c789d1adc240b6d6", @@ -63,7 +69,7 @@ class TestCountInstallation(CliTestCase): http_method.assert_called_with('http://127.0.0.1:9005/v2/bundles') - def test_return_zero_v1(self): + def test_return_zero_installation_count_v1(self): bundles_endpoint_reply = '[]' http_method = self.respond_with(text=bundles_endpoint_reply) @@ -79,7 +85,7 @@ class TestCountInstallation(CliTestCase): http_method.assert_called_with('http://127.0.0.1:9005/bundles') - def test_return_zero_v2(self): + def test_return_zero_installation_count_v2(self): bundles_endpoint_reply = '[]' http_method = self.respond_with(text=bundles_endpoint_reply) @@ -96,14 +102,14 @@ class TestCountInstallation(CliTestCase): http_method.assert_called_with('http://127.0.0.1:9005/v2/bundles') -class TestWaitForScale(CliTestCase): +class TestWaitForInstallation(CliTestCase): def test_wait_for_installation(self): count_installations_mock = MagicMock(side_effect=[0, 1]) url_mock = MagicMock(return_value='/bundle-events/endpoint') get_events_mock = MagicMock(return_value=[ - self.create_test_event(None), - self.create_test_event('bundleInstallationAdded'), - self.create_test_event('bundleInstallationAdded') + create_test_event(None), + create_test_event('bundleInstallationAdded'), + create_test_event('bundleInstallationAdded') ]) stdout = MagicMock() @@ -124,7 +130,6 @@ class TestWaitForScale(CliTestCase): ]) url_mock.assert_called_with('bundles/events', args) - self.maxDiff = None self.assertEqual(strip_margin("""|Bundle a101449418187d92c789d1adc240b6d6 waiting to be installed |Bundle a101449418187d92c789d1adc240b6d6 installed @@ -154,9 +159,9 @@ class TestWaitForScale(CliTestCase): count_installations_mock = MagicMock(side_effect=[0, 1, 1]) url_mock = MagicMock(return_value='/bundle-events/endpoint') get_events_mock = MagicMock(return_value=[ - self.create_test_event('bundleExecutionAdded'), - self.create_test_event('bundleExecutionAdded'), - self.create_test_event('bundleExecutionAdded') + create_test_event('bundleInstallationAdded'), + create_test_event('bundleInstallationAdded'), + create_test_event('bundleInstallationAdded') ]) stdout = MagicMock() @@ -185,9 +190,9 @@ class TestWaitForScale(CliTestCase): count_installations_mock = MagicMock(return_value=0) url_mock = MagicMock(return_value='/bundle-events/endpoint') get_events_mock = MagicMock(return_value=[ - self.create_test_event('bundleInstallationAdded'), - self.create_test_event('bundleInstallationAdded'), - self.create_test_event('bundleInstallationAdded') + create_test_event('bundleInstallationAdded'), + create_test_event('bundleInstallationAdded'), + create_test_event('bundleInstallationAdded') ]) stdout = MagicMock() @@ -217,7 +222,123 @@ class TestWaitForScale(CliTestCase): |Bundle a101449418187d92c789d1adc240b6d6 still waiting to be installed |"""), self.output(stdout)) - def create_test_event(self, event_name): - sse_mock = MagicMock() - sse_mock.event = event_name - return sse_mock + +class TestWaitForUninstallation(CliTestCase): + def test_wait_for_uninstallation(self): + count_installations_mock = MagicMock(side_effect=[1, 0]) + url_mock = MagicMock(return_value='/bundle-events/endpoint') + get_events_mock = MagicMock(return_value=[ + create_test_event(None), + create_test_event('bundleInstallationRemoved'), + create_test_event('bundleInstallationRemoved') + ]) + + stdout = MagicMock() + + bundle_id = 'a101449418187d92c789d1adc240b6d6' + args = MagicMock(**{ + 'wait_timeout': 10 + }) + with patch('conductr_cli.conduct_url.url', url_mock), \ + patch('conductr_cli.bundle_installation.count_installations', count_installations_mock), \ + patch('conductr_cli.sse_client.get_events', get_events_mock): + logging_setup.configure_logging(args, stdout) + bundle_installation.wait_for_uninstallation(bundle_id, args) + + self.assertEqual(count_installations_mock.call_args_list, [ + call(bundle_id, args), + call(bundle_id, args) + ]) + + url_mock.assert_called_with('bundles/events', args) + + self.assertEqual(strip_margin("""|Bundle a101449418187d92c789d1adc240b6d6 waiting to be uninstalled + |Bundle a101449418187d92c789d1adc240b6d6 uninstalled + |"""), self.output(stdout)) + + def test_return_immediately_if_uninstalled(self): + count_installations_mock = MagicMock(side_effect=[0]) + + stdout = MagicMock() + + bundle_id = 'a101449418187d92c789d1adc240b6d6' + args = MagicMock(**{ + 'wait_timeout': 10 + }) + with patch('conductr_cli.bundle_installation.count_installations', count_installations_mock): + logging_setup.configure_logging(args, stdout) + bundle_installation.wait_for_uninstallation(bundle_id, args) + + self.assertEqual(count_installations_mock.call_args_list, [ + call(bundle_id, args) + ]) + + self.assertEqual(strip_margin("""|Bundle a101449418187d92c789d1adc240b6d6 is uninstalled + |"""), self.output(stdout)) + + def test_wait_timeout(self): + count_installations_mock = MagicMock(side_effect=[1, 1, 1]) + url_mock = MagicMock(return_value='/bundle-events/endpoint') + get_events_mock = MagicMock(return_value=[ + create_test_event('bundleInstallationAdded'), + create_test_event('bundleInstallationAdded'), + create_test_event('bundleInstallationAdded') + ]) + + stdout = MagicMock() + + bundle_id = 'a101449418187d92c789d1adc240b6d6' + args = MagicMock(**{ + # Purposely set no timeout to invoke the error + 'wait_timeout': 0 + }) + with patch('conductr_cli.conduct_url.url', url_mock), \ + patch('conductr_cli.bundle_installation.count_installations', count_installations_mock), \ + patch('conductr_cli.sse_client.get_events', get_events_mock): + logging_setup.configure_logging(args, stdout) + self.assertRaises(WaitTimeoutError, bundle_installation.wait_for_uninstallation, bundle_id, args) + + self.assertEqual(count_installations_mock.call_args_list, [ + call(bundle_id, args) + ]) + + url_mock.assert_called_with('bundles/events', args) + + self.assertEqual(strip_margin("""|Bundle a101449418187d92c789d1adc240b6d6 waiting to be uninstalled + |"""), self.output(stdout)) + + def test_wait_timeout_all_events(self): + count_installations_mock = MagicMock(return_value=1) + url_mock = MagicMock(return_value='/bundle-events/endpoint') + get_events_mock = MagicMock(return_value=[ + create_test_event('bundleInstallationAdded'), + create_test_event('bundleInstallationAdded'), + create_test_event('bundleInstallationAdded') + ]) + + stdout = MagicMock() + + bundle_id = 'a101449418187d92c789d1adc240b6d6' + args = MagicMock(**{ + 'wait_timeout': 10 + }) + with patch('conductr_cli.conduct_url.url', url_mock), \ + patch('conductr_cli.bundle_installation.count_installations', count_installations_mock), \ + patch('conductr_cli.sse_client.get_events', get_events_mock): + logging_setup.configure_logging(args, stdout) + self.assertRaises(WaitTimeoutError, bundle_installation.wait_for_uninstallation, bundle_id, args) + + self.assertEqual(count_installations_mock.call_args_list, [ + call(bundle_id, args), + call(bundle_id, args), + call(bundle_id, args), + call(bundle_id, args) + ]) + + url_mock.assert_called_with('bundles/events', args) + + self.assertEqual(strip_margin("""|Bundle a101449418187d92c789d1adc240b6d6 waiting to be uninstalled + |Bundle a101449418187d92c789d1adc240b6d6 still waiting to be uninstalled + |Bundle a101449418187d92c789d1adc240b6d6 still waiting to be uninstalled + |Bundle a101449418187d92c789d1adc240b6d6 still waiting to be uninstalled + |"""), self.output(stdout)) diff --git a/conductr_cli/test/test_conduct.py b/conductr_cli/test/test_conduct.py index f0c2141..15df94d 100644 --- a/conductr_cli/test/test_conduct.py +++ b/conductr_cli/test/test_conduct.py @@ -60,6 +60,8 @@ class TestConduct(TestCase): self.assertEqual(args.resolve_cache_dir, '{}/.conductr/cache'.format(os.path.expanduser('~'))) self.assertEqual(args.verbose, False) self.assertEqual(args.long_ids, False) + self.assertEqual(args.no_wait, False) + self.assertEqual(args.wait_timeout, 60) self.assertEqual(args.bundle, 'path-to-bundle') self.assertEqual(args.configuration, 'path-to-conf') @@ -74,6 +76,8 @@ class TestConduct(TestCase): self.assertEqual(args.resolve_cache_dir, '/somewhere') self.assertEqual(args.verbose, False) self.assertEqual(args.long_ids, False) + self.assertEqual(args.no_wait, False) + self.assertEqual(args.wait_timeout, 60) self.assertEqual(args.bundle, 'path-to-bundle') self.assertEqual(args.configuration, 'path-to-conf') self.assertFalse(args.quiet) @@ -88,6 +92,8 @@ class TestConduct(TestCase): self.assertEqual(args.cli_settings_dir, '{}/.conductr'.format(os.path.expanduser('~'))) self.assertEqual(args.verbose, False) self.assertEqual(args.long_ids, False) + self.assertEqual(args.no_wait, False) + self.assertEqual(args.wait_timeout, 60) self.assertEqual(args.scale, 5) self.assertEqual(args.bundle, 'path-to-bundle') @@ -101,6 +107,8 @@ class TestConduct(TestCase): self.assertEqual(args.cli_settings_dir, '{}/.conductr'.format(os.path.expanduser('~'))) self.assertEqual(args.verbose, False) self.assertEqual(args.long_ids, False) + self.assertEqual(args.no_wait, False) + self.assertEqual(args.wait_timeout, 60) self.assertEqual(args.bundle, 'path-to-bundle') def test_parser_unload(self): @@ -113,6 +121,8 @@ class TestConduct(TestCase): self.assertEqual(args.cli_settings_dir, '{}/.conductr'.format(os.path.expanduser('~'))) self.assertEqual(args.verbose, False) self.assertEqual(args.long_ids, False) + self.assertEqual(args.no_wait, False) + self.assertEqual(args.wait_timeout, 60) self.assertEqual(args.bundle, 'path-to-bundle') def test_get_cli_parameters(self): diff --git a/conductr_cli/test/test_conduct_unload.py b/conductr_cli/test/test_conduct_unload.py index b2adc98..92f69d4 100644 --- a/conductr_cli/test/test_conduct_unload.py +++ b/conductr_cli/test/test_conduct_unload.py @@ -22,6 +22,7 @@ class TestConductUnloadCommand(CliTestCase): 'port': 9005, 'api_version': '1', 'verbose': False, + 'no_wait': False, 'quiet': False, 'cli_parameters': '', 'bundle': '45e0c477d3e5ea92aa8d85c0d8f3e25c' @@ -37,51 +38,101 @@ class TestConductUnloadCommand(CliTestCase): return strip_margin(self.output_template.format(**{'params': params})) def test_success(self): + wait_for_uninstallation_mock = MagicMock() http_method = self.respond_with(200, self.default_response) stdout = MagicMock() - with patch('requests.delete', http_method): - logging_setup.configure_logging(MagicMock(**self.default_args), stdout) - result = conduct_unload.unload(MagicMock(**self.default_args)) + input_args = MagicMock(**self.default_args) + with patch('requests.delete', http_method), \ + patch('conductr_cli.bundle_installation.wait_for_uninstallation', wait_for_uninstallation_mock): + logging_setup.configure_logging(input_args, stdout) + result = conduct_unload.unload(input_args) self.assertTrue(result) http_method.assert_called_with(self.default_url, timeout=DEFAULT_HTTP_TIMEOUT) + wait_for_uninstallation_mock.assert_called_with('45e0c477d3e5ea92aa8d85c0d8f3e25c', input_args) self.assertEqual(self.default_output(), self.output(stdout)) def test_success_verbose(self): + wait_for_uninstallation_mock = MagicMock() http_method = self.respond_with(200, self.default_response) stdout = MagicMock() - with patch('requests.delete', http_method), patch('sys.stdout', stdout): - args = self.default_args.copy() - args.update({'verbose': True}) - logging_setup.configure_logging(MagicMock(**args), stdout) - result = conduct_unload.unload(MagicMock(**args)) + args = self.default_args.copy() + args.update({'verbose': True}) + input_args = MagicMock(**args) + + with patch('requests.delete', http_method), \ + patch('conductr_cli.bundle_installation.wait_for_uninstallation', wait_for_uninstallation_mock): + logging_setup.configure_logging(input_args, stdout) + result = conduct_unload.unload(input_args) self.assertTrue(result) http_method.assert_called_with(self.default_url, timeout=DEFAULT_HTTP_TIMEOUT) + wait_for_uninstallation_mock.assert_called_with('45e0c477d3e5ea92aa8d85c0d8f3e25c', input_args) self.assertEqual(self.default_response + self.default_output(), self.output(stdout)) + def test_success_quiet(self): + wait_for_uninstallation_mock = MagicMock() + http_method = self.respond_with(200, self.default_response) + stdout = MagicMock() + + args = self.default_args.copy() + args.update({'quiet': True}) + input_args = MagicMock(**args) + + with patch('requests.delete', http_method), \ + patch('conductr_cli.bundle_installation.wait_for_uninstallation', wait_for_uninstallation_mock): + logging_setup.configure_logging(input_args, stdout) + result = conduct_unload.unload(input_args) + self.assertTrue(result) + + http_method.assert_called_with(self.default_url, timeout=DEFAULT_HTTP_TIMEOUT) + wait_for_uninstallation_mock.assert_called_with('45e0c477d3e5ea92aa8d85c0d8f3e25c', input_args) + + self.assertEqual('45e0c477d3e5ea92aa8d85c0d8f3e25c\n', self.output(stdout)) + def test_success_with_configuration(self): + wait_for_uninstallation_mock = MagicMock() http_method = self.respond_with(200, self.default_response) stdout = MagicMock() + args = self.default_args.copy() cli_parameters = ' --ip 127.0.1.1 --port 9006' - with patch('requests.delete', http_method): - args = self.default_args.copy() - args.update({'cli_parameters': cli_parameters}) - logging_setup.configure_logging(MagicMock(**args), stdout) - result = conduct_unload.unload(MagicMock(**args)) + args.update({'cli_parameters': cli_parameters}) + input_args = MagicMock(**args) + + with patch('requests.delete', http_method), \ + patch('conductr_cli.bundle_installation.wait_for_uninstallation', wait_for_uninstallation_mock): + logging_setup.configure_logging(input_args, stdout) + result = conduct_unload.unload(input_args) self.assertTrue(result) http_method.assert_called_with(self.default_url, timeout=DEFAULT_HTTP_TIMEOUT) + wait_for_uninstallation_mock.assert_called_with('45e0c477d3e5ea92aa8d85c0d8f3e25c', input_args) self.assertEqual( self.default_output(params=cli_parameters), self.output(stdout)) + def test_success_no_wait(self): + http_method = self.respond_with(200, self.default_response) + stdout = MagicMock() + + args = self.default_args.copy() + args.update({'no_wait': True}) + input_args = MagicMock(**args) + with patch('requests.delete', http_method): + logging_setup.configure_logging(input_args, stdout) + result = conduct_unload.unload(input_args) + self.assertTrue(result) + + http_method.assert_called_with(self.default_url, timeout=DEFAULT_HTTP_TIMEOUT) + + self.assertEqual(self.default_output(), self.output(stdout)) + def test_failure(self): http_method = self.respond_with(404) stderr = MagicMock()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 3 }
0.21
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "flake8", "pep8-naming", "flake8-quotes" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
argcomplete==3.6.1 arrow==1.3.0 certifi==2025.1.31 charset-normalizer==3.4.1 -e git+https://github.com/typesafehub/conductr-cli.git@5947a412f5c61a10f5e5f0aa2da85cac2c66faca#egg=conductr_cli exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work flake8==7.2.0 flake8-quotes==3.4.0 idna==3.10 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work mccabe==0.7.0 packaging @ file:///croot/packaging_1734472117206/work pep8-naming==0.14.1 pluggy @ file:///croot/pluggy_1733169602837/work pycodestyle==2.13.0 pyflakes==3.3.1 pyhocon==0.2.1 pyparsing==2.0.3 pytest @ file:///croot/pytest_1738938843180/work python-dateutil==2.9.0.post0 requests==2.32.3 six==1.17.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work types-python-dateutil==2.9.0.20241206 urllib3==2.3.0
name: conductr-cli channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - argcomplete==3.6.1 - arrow==1.3.0 - certifi==2025.1.31 - charset-normalizer==3.4.1 - flake8==7.2.0 - flake8-quotes==3.4.0 - idna==3.10 - mccabe==0.7.0 - pep8-naming==0.14.1 - pycodestyle==2.13.0 - pyflakes==3.3.1 - pyhocon==0.2.1 - pyparsing==2.0.3 - python-dateutil==2.9.0.post0 - requests==2.32.3 - six==1.17.0 - types-python-dateutil==2.9.0.20241206 - urllib3==2.3.0 prefix: /opt/conda/envs/conductr-cli
[ "conductr_cli/test/test_bundle_installation.py::TestWaitForUninstallation::test_return_immediately_if_uninstalled", "conductr_cli/test/test_bundle_installation.py::TestWaitForUninstallation::test_wait_for_uninstallation", "conductr_cli/test/test_bundle_installation.py::TestWaitForUninstallation::test_wait_timeout", "conductr_cli/test/test_bundle_installation.py::TestWaitForUninstallation::test_wait_timeout_all_events", "conductr_cli/test/test_conduct.py::TestConduct::test_parser_unload", "conductr_cli/test/test_conduct_unload.py::TestConductUnloadCommand::test_success", "conductr_cli/test/test_conduct_unload.py::TestConductUnloadCommand::test_success_quiet", "conductr_cli/test/test_conduct_unload.py::TestConductUnloadCommand::test_success_verbose", "conductr_cli/test/test_conduct_unload.py::TestConductUnloadCommand::test_success_with_configuration" ]
[]
[ "conductr_cli/test/test_bundle_installation.py::TestCountInstallation::test_return_installation_count", "conductr_cli/test/test_bundle_installation.py::TestCountInstallation::test_return_installation_count_v2", "conductr_cli/test/test_bundle_installation.py::TestCountInstallation::test_return_zero_installation_count_v1", "conductr_cli/test/test_bundle_installation.py::TestCountInstallation::test_return_zero_installation_count_v2", "conductr_cli/test/test_bundle_installation.py::TestWaitForInstallation::test_return_immediately_if_installed", "conductr_cli/test/test_bundle_installation.py::TestWaitForInstallation::test_wait_for_installation", "conductr_cli/test/test_bundle_installation.py::TestWaitForInstallation::test_wait_timeout", "conductr_cli/test/test_bundle_installation.py::TestWaitForInstallation::test_wait_timeout_all_events", "conductr_cli/test/test_conduct.py::TestConduct::test_default", "conductr_cli/test/test_conduct.py::TestConduct::test_get_cli_parameters", "conductr_cli/test/test_conduct.py::TestConduct::test_parser_info", "conductr_cli/test/test_conduct.py::TestConduct::test_parser_load", "conductr_cli/test/test_conduct.py::TestConduct::test_parser_load_with_custom_resolve_cache_dir", "conductr_cli/test/test_conduct.py::TestConduct::test_parser_run", "conductr_cli/test/test_conduct.py::TestConduct::test_parser_services", "conductr_cli/test/test_conduct.py::TestConduct::test_parser_stop", "conductr_cli/test/test_conduct.py::TestConduct::test_parser_version", "conductr_cli/test/test_conduct_unload.py::TestConductUnloadCommand::test_failure", "conductr_cli/test/test_conduct_unload.py::TestConductUnloadCommand::test_failure_invalid_address", "conductr_cli/test/test_conduct_unload.py::TestConductUnloadCommand::test_success_no_wait" ]
[]
Apache License 2.0
326
mapbox__mapbox-sdk-py-91
c45e856d0640c6d9b1750446fed58cb882912edb
2015-12-09 15:26:44
c45e856d0640c6d9b1750446fed58cb882912edb
diff --git a/docs/mapmatching.md b/docs/mapmatching.md new file mode 100644 index 0000000..f404714 --- /dev/null +++ b/docs/mapmatching.md @@ -0,0 +1,89 @@ +# Map Matching + +The `MapMatcher` class from the `mapbox.services.mapmatching` module provides +access to the Mapbox Map Matching API. You can also import it directly from the +`mapbox` module. + +```python +>>> from mapbox import MapMatcher + +``` + +See https://www.mapbox.com/developers/api/map-matching/ for general documentation +of the API. + +Your Mapbox access token should be set in your environment; see the [access +tokens](access_tokens.md) documentation for more information. + +## MapMatcher methods + +The methods of the `MapMatcher` class return an instance of +[`requests.Response`](http://docs.python-requests.org/en/latest/api/#requests.Response). + +In addition to the `json()` method that returns Python data parsed from the +API, the responses provide a `geojson()` method that converts that +data to a GeoJSON like form. + +## Usage + +The Mapbox Map Matching API lets you take recorded GPS traces and snap them to the OpenStreetMap road and path network. This is helpful for aligning noisy traces and displaying them cleanly on a map. + +The Map Matching API is limited to 60 requests per minute and results must be displayed on a Mapbox map using one of our SDKs. For high volume or other use cases, contact us. + + +```python +>>> service = MapMatcher() + +``` + +The input data to the Map Matcher must be a single GeoJSON-like Feature with a LineString geometry. +The optional `coordTimes` property should be an array of the same length as the coordinates +containing timestamps to help make the matching more accurate. + +``` +>>> line = { +... "type": "Feature", +... "properties": { +... "coordTimes": [ +... "2015-04-21T06:00:00Z", +... "2015-04-21T06:00:05Z", +... "2015-04-21T06:00:10Z", +... "2015-04-21T06:00:15Z", +... "2015-04-21T06:00:20Z"]}, +... "geometry": { +... "type": "LineString", +... "coordinates": [ +... [13.418946862220764, 52.50055852688439], +... [13.419011235237122, 52.50113000479732], +... [13.419756889343262, 52.50171780290061], +... [13.419885635375975, 52.50237416816131], +... [13.420631289482117, 52.50294888790448]]}} + +``` + +Use the `surface()` method to query the terrain dataset. + +```python +>>> response = service.match(line, profile='mapbox.driving') +>>> response.status_code +200 +>>> response.headers['Content-Type'] +'application/json; charset=utf-8' + +``` + +The response geojson contains a FeatureCollection with a single feature, +with the new LineString corrected to match segments from the selected profile. + +```python +>>> corrected = response.geojson()['features'][0] +>>> corrected['geometry']['type'] +'LineString' +>>> corrected['geometry'] == line['geometry'] +False +>>> len(corrected['geometry']) == len(line['geometry']) +True + +``` + +See ``import mapbox; help(mapbox.MapMatcher)`` for more detailed usage. diff --git a/mapbox/__init__.py b/mapbox/__init__.py index e4d15aa..0c25b2b 100644 --- a/mapbox/__init__.py +++ b/mapbox/__init__.py @@ -5,6 +5,7 @@ __version__ = "0.5.0" from .services.directions import Directions from .services.distance import Distance from .services.geocoding import Geocoder, InvalidPlaceTypeError +from .services.mapmatching import MapMatcher from .services.surface import Surface -from .services.uploads import Uploader from .services.static import Static +from .services.uploads import Uploader diff --git a/mapbox/services/mapmatching.py b/mapbox/services/mapmatching.py new file mode 100644 index 0000000..3dd5d92 --- /dev/null +++ b/mapbox/services/mapmatching.py @@ -0,0 +1,37 @@ +import json + +from uritemplate import URITemplate + +from mapbox.services.base import Service + + +class MapMatcher(Service): + + def __init__(self, access_token=None): + self.baseuri = 'https://api.mapbox.com/matching/v4' + self.session = self.get_session(access_token) + + def _validate_profile(self, profile): + valid_profiles = ['mapbox.driving', 'mapbox.cycling', 'mapbox.walking'] + if profile not in valid_profiles: + raise ValueError("{} is not a valid profile".format(profile)) + return profile + + def match(self, feature, profile='mapbox.driving'): + profile = self._validate_profile(profile) + # todo validate single feature with linestring geometry up to 100 pts + geojson_line_feature = json.dumps(feature) + + uri = URITemplate('%s/{profile}.json' % self.baseuri).expand( + profile=profile) + + res = self.session.post(uri, data=geojson_line_feature, + headers={'Content-Type': 'application/json'}) + self.handle_http_error(res) + + def geojson(): + return res.json() + + res.geojson = geojson + + return res
Map Matching API
mapbox/mapbox-sdk-py
diff --git a/tests/test_mapmatching.py b/tests/test_mapmatching.py new file mode 100644 index 0000000..2277866 --- /dev/null +++ b/tests/test_mapmatching.py @@ -0,0 +1,47 @@ +import pytest +import responses + +import mapbox + [email protected] +def line_feature(): + return { + "type": "Feature", + "properties": { + "coordTimes": [ + "2015-04-21T06:00:00Z", + "2015-04-21T06:00:05Z", + "2015-04-21T06:00:10Z", + "2015-04-21T06:00:15Z", + "2015-04-21T06:00:20Z"]}, + "geometry": { + "type": "LineString", + "coordinates": [ + [13.418946862220764, 52.50055852688439], + [13.419011235237122, 52.50113000479732], + [13.419756889343262, 52.50171780290061], + [13.419885635375975, 52.50237416816131], + [13.420631289482117, 52.50294888790448]]}} + + [email protected] +def test_matching(line_feature): + + body = '{"type":"FeatureCollection","features":[{"type":"Feature","properties":{"confidence":0.8165504318718629,"matchedPoints":[[13.418805122375488,52.5005989074707],[13.419145584106445,52.501094818115234],[13.419618606567383,52.50175094604492],[13.420042037963867,52.50233459472656],[13.420494079589844,52.50298309326172]],"indices":[0,1,2,3,4]},"geometry":{"type":"LineString","coordinates":[[13.418805,52.500599],[13.418851,52.500659],[13.419121,52.501057],[13.419146,52.501095],[13.419276,52.501286],[13.419446,52.501518],[13.419619,52.501753],[13.419981,52.502249],[13.420042,52.502335],[13.420494,52.502984]]}}]}' + + responses.add( + responses.POST, + 'https://api.mapbox.com/matching/v4/mapbox.driving.json?access_token=pk.test', + match_querystring=True, + body=body, status=200, + content_type='application/json') + + service = mapbox.MapMatcher(access_token='pk.test') + res = service.match(line_feature) + assert res.status_code == 200 + + +def test_invalid_profile(line_feature): + service = mapbox.MapMatcher(access_token='pk.test') + with pytest.raises(ValueError): + service.match(line_feature, profile="covered_wagon")
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 3, "test_score": 0 }, "num_modified_files": 1 }
0.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[test]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": null, "python": "3.5", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work boto3==1.23.10 botocore==1.26.10 certifi==2021.5.30 charset-normalizer==2.0.12 click==8.0.4 click-plugins==1.1.1 cligj==0.7.2 coverage==6.2 coveralls==3.3.1 distlib==0.3.9 docopt==0.6.2 filelock==3.4.1 idna==3.10 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work jmespath==0.10.0 -e git+https://github.com/mapbox/mapbox-sdk-py.git@c45e856d0640c6d9b1750446fed58cb882912edb#egg=mapbox more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work platformdirs==2.4.0 pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 pytest-cov==4.0.0 python-dateutil==2.9.0.post0 requests==2.27.1 responses==0.17.0 s3transfer==0.5.2 six==1.17.0 toml @ file:///tmp/build/80754af9/toml_1616166611790/work tomli==1.2.3 tox==3.28.0 typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work uritemplate==4.1.1 uritemplate.py==3.0.2 urllib3==1.26.20 virtualenv==20.17.1 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: mapbox-sdk-py channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - boto3==1.23.10 - botocore==1.26.10 - charset-normalizer==2.0.12 - click==8.0.4 - click-plugins==1.1.1 - cligj==0.7.2 - coverage==6.2 - coveralls==3.3.1 - distlib==0.3.9 - docopt==0.6.2 - filelock==3.4.1 - idna==3.10 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - jmespath==0.10.0 - platformdirs==2.4.0 - pytest-cov==4.0.0 - python-dateutil==2.9.0.post0 - requests==2.27.1 - responses==0.17.0 - s3transfer==0.5.2 - six==1.17.0 - tomli==1.2.3 - tox==3.28.0 - uritemplate==4.1.1 - uritemplate-py==3.0.2 - urllib3==1.26.20 - virtualenv==20.17.1 prefix: /opt/conda/envs/mapbox-sdk-py
[ "tests/test_mapmatching.py::test_matching", "tests/test_mapmatching.py::test_invalid_profile" ]
[]
[]
[]
MIT License
327
joblib__joblib-283
c60d263fcc71ba9f4532010b732cde42e437039b
2015-12-10 15:14:21
40341615cc2600675ce7457d9128fb030f6f89fa
lesteve: > inspect.getfullargspec is marked as deprecated since 3.5, why not directly use a compatibility function called signature and use the one from python >= 3.4. Yeah I am aware of that but I wanted to do the smallest change to make joblib.Memory support functions with signature or keyword-only arguments. scikit-learn went the way you mentioned and ended up having to backport OrderedDict in order to support python 2.6. aabadie: @lesteve, to me your change is an improvement and I'm ok to merge it. But if you have time and think it's worth reusing the scikit-learn strategy, then go ahead ;) lesteve: > @lesteve, to me your change is an improvement and I'm ok to merge it. But if you have time and think it's worth reusing the scikit-learn strategy, then go ahead ;) I am enclined to go with this simpler strategy for now. The only thing I want to look at is tackling this comment from above: > At the moment I am not doing any checks to see whether the keyword-only arguments were indeed passed as keywords. Maybe I should since there is some checks for the number of arguments in this function already. lesteve: Right, I fixed the keyword-only argument passed as positional parameter in joblib.function_inspect.filter_args. As you would expect, the snippet above now raises ``` ValueError: Keyword-only parameter 'kw1' was passed as positional parameter for func_with_kwonly_args(a, b, *, kw1='kw1', kw2='kw2'): func_with_kwonly_args(1, 2, 3, kw2=4) was called. ``` Any more comments ? aabadie: @lesteve, @ogrisel, could this one be merged ? ogrisel: Besides minor comments, LGTM. I can do the cosmetics changed when merging if you wish.
diff --git a/joblib/func_inspect.py b/joblib/func_inspect.py index cc5cbf6..3eee40b 100644 --- a/joblib/func_inspect.py +++ b/joblib/func_inspect.py @@ -11,11 +11,15 @@ import inspect import warnings import re import os - +import sys from ._compat import _basestring from .logger import pformat from ._memory_helpers import open_py_source + +PY3 = sys.version_info[0] >= 3 + + def get_func_code(func): """ Attempts to retrieve a reliable function code hash. @@ -156,6 +160,53 @@ def get_func_name(func, resolv_alias=True, win_characters=True): return module, name +def getfullargspec(func): + """Compatibility function to provide inspect.getfullargspec in Python 2 + + This should be rewritten using a backport of Python 3 signature + once we drop support for Python 2.6. We went for a simpler + approach at the time of writing because signature uses OrderedDict + which is not available in Python 2.6. + """ + try: + return inspect.getfullargspec(func) + except AttributeError: + arg_spec = inspect.getargspec(func) + import collections + tuple_fields = ('args varargs varkw defaults kwonlyargs ' + 'kwonlydefaults annotations') + tuple_type = collections.namedtuple('FullArgSpec', tuple_fields) + + return tuple_type(args=arg_spec.args, + varargs=arg_spec.varargs, + varkw=arg_spec.keywords, + defaults=arg_spec.defaults, + kwonlyargs=[], + kwonlydefaults=None, + annotations={}) + + +def _signature_str(function_name, arg_spec): + """Helper function to output a function signature""" + # inspect.formatargspec can not deal with the same + # number of arguments in python 2 and 3 + arg_spec_for_format = arg_spec[:7 if PY3 else 4] + + arg_spec_str = inspect.formatargspec(*arg_spec_for_format) + return '{0}{1}'.format(function_name, arg_spec_str) + + +def _function_called_str(function_name, args, kwargs): + """Helper function to output a function call""" + template_str = '{0}({1}, {2})' + + args_str = repr(args)[1:-1] + kwargs_str = ', '.join('%s=%s' % (k, v) + for k, v in kwargs.items()) + return template_str.format(function_name, args_str, + kwargs_str) + + def filter_args(func, ignore_lst, args=(), kwargs=dict()): """ Filters the given args and kwargs using a list of arguments to ignore, and a function specification. @@ -180,19 +231,22 @@ def filter_args(func, ignore_lst, args=(), kwargs=dict()): args = list(args) if isinstance(ignore_lst, _basestring): # Catch a common mistake - raise ValueError('ignore_lst must be a list of parameters to ignore ' + raise ValueError( + 'ignore_lst must be a list of parameters to ignore ' '%s (type %s) was given' % (ignore_lst, type(ignore_lst))) # Special case for functools.partial objects if (not inspect.ismethod(func) and not inspect.isfunction(func)): if ignore_lst: warnings.warn('Cannot inspect object %s, ignore list will ' - 'not work.' % func, stacklevel=2) + 'not work.' % func, stacklevel=2) return {'*': args, '**': kwargs} - arg_spec = inspect.getargspec(func) - arg_names = arg_spec.args - arg_defaults = arg_spec.defaults or {} - arg_keywords = arg_spec.keywords + arg_spec = getfullargspec(func) + arg_names = arg_spec.args + arg_spec.kwonlyargs + arg_defaults = arg_spec.defaults or () + arg_defaults = arg_defaults + tuple(arg_spec.kwonlydefaults[k] + for k in arg_spec.kwonlyargs) arg_varargs = arg_spec.varargs + arg_varkw = arg_spec.varkw if inspect.ismethod(func): # First argument is 'self', it has been removed by Python @@ -207,7 +261,18 @@ def filter_args(func, ignore_lst, args=(), kwargs=dict()): for arg_position, arg_name in enumerate(arg_names): if arg_position < len(args): # Positional argument or keyword argument given as positional - arg_dict[arg_name] = args[arg_position] + if arg_name not in arg_spec.kwonlyargs: + arg_dict[arg_name] = args[arg_position] + else: + raise ValueError( + "Keyword-only parameter '%s' was passed as " + 'positional parameter for %s:\n' + ' %s was called.' + % (arg_name, + _signature_str(name, arg_spec), + _function_called_str(name, args, kwargs)) + ) + else: position = arg_position - len(arg_names) if arg_name in kwargs: @@ -217,28 +282,24 @@ def filter_args(func, ignore_lst, args=(), kwargs=dict()): arg_dict[arg_name] = arg_defaults[position] except (IndexError, KeyError): # Missing argument - raise ValueError('Wrong number of arguments for %s%s:\n' - ' %s(%s, %s) was called.' - % (name, - inspect.formatargspec(*inspect.getargspec(func)), - name, - repr(args)[1:-1], - ', '.join('%s=%s' % (k, v) - for k, v in kwargs.items()) - ) - ) + raise ValueError( + 'Wrong number of arguments for %s:\n' + ' %s was called.' + % (_signature_str(name, arg_spec), + _function_called_str(name, args, kwargs)) + ) varkwargs = dict() for arg_name, arg_value in sorted(kwargs.items()): if arg_name in arg_dict: arg_dict[arg_name] = arg_value - elif arg_keywords is not None: + elif arg_varkw is not None: varkwargs[arg_name] = arg_value else: raise TypeError("Ignore list for %s() contains an unexpected " "keyword argument '%s'" % (name, arg_name)) - if arg_keywords is not None: + if arg_varkw is not None: arg_dict['**'] = varkwargs if arg_varargs is not None: varargs = args[arg_position + 1:] @@ -250,13 +311,10 @@ def filter_args(func, ignore_lst, args=(), kwargs=dict()): arg_dict.pop(item) else: raise ValueError("Ignore list: argument '%s' is not defined for " - "function %s%s" % - (item, name, - inspect.formatargspec(arg_names, - arg_varargs, - arg_keywords, - arg_defaults, - ))) + "function %s" + % (item, + _signature_str(name, arg_spec)) + ) # XXX: Return a sorted list of pairs? return arg_dict
Replace deprecated usage of inspect.getargspec inspect.getargspec has been deprecated since 3.0 (in favor of inspect.getfullargspec until 3.2 and in favor of inspect.signature since 3.3) and will be removed in 3.6. It also creates visible DeprecationWarning in 3.5.
joblib/joblib
diff --git a/joblib/test/test_func_inspect.py b/joblib/test/test_func_inspect.py index 15c6b43..62920e9 100644 --- a/joblib/test/test_func_inspect.py +++ b/joblib/test/test_func_inspect.py @@ -11,12 +11,15 @@ import shutil import nose import tempfile import functools +import sys from joblib.func_inspect import filter_args, get_func_name, get_func_code from joblib.func_inspect import _clean_win_chars, format_signature from joblib.memory import Memory from joblib.test.common import with_numpy +from joblib.testing import assert_raises_regex +PY3 = sys.version_info[0] >= 3 ############################################################################### # Module-level functions, for tests @@ -165,6 +168,37 @@ def test_func_inspect_errors(): __file__.replace('.pyc', '.py')) +if PY3: + exec(""" +def func_with_kwonly_args(a, b, *, kw1='kw1', kw2='kw2'): pass + +def func_with_signature(a: int, b: int) -> None: pass +""") + + def test_filter_args_python_3(): + nose.tools.assert_equal( + filter_args(func_with_kwonly_args, + [], (1, 2), {'kw1': 3, 'kw2': 4}), + {'a': 1, 'b': 2, 'kw1': 3, 'kw2': 4}) + + # filter_args doesn't care about keyword-only arguments so you + # can pass 'kw1' into *args without any problem + assert_raises_regex( + ValueError, + "Keyword-only parameter 'kw1' was passed as positional parameter", + filter_args, + func_with_kwonly_args, [], (1, 2, 3), {'kw2': 2}) + + nose.tools.assert_equal( + filter_args(func_with_kwonly_args, ['b', 'kw2'], (1, 2), + {'kw1': 3, 'kw2': 4}), + {'a': 1, 'kw1': 3}) + + nose.tools.assert_equal( + filter_args(func_with_signature, ['b'], (1, 2)), + {'a': 1}) + + def test_bound_methods(): """ Make sure that calling the same method on two different instances of the same class does resolv to different signatures. diff --git a/joblib/test/test_memory.py b/joblib/test/test_memory.py index 14e319f..5f0bbfd 100644 --- a/joblib/test/test_memory.py +++ b/joblib/test/test_memory.py @@ -21,7 +21,9 @@ import nose from joblib.memory import Memory, MemorizedFunc, NotMemorizedFunc, MemorizedResult from joblib.memory import NotMemorizedResult, _FUNCTION_HASHES from joblib.test.common import with_numpy, np +from joblib.testing import assert_raises_regex +PY3 = sys.version_info[0] >= 3 ############################################################################### # Module-level variables for the tests @@ -676,3 +678,48 @@ def test_memory_in_memory_function_code_change(): def test_clear_memory_with_none_cachedir(): mem = Memory(cachedir=None) mem.clear() + +if PY3: + exec(""" +def func_with_kwonly_args(a, b, *, kw1='kw1', kw2='kw2'): + return a, b, kw1, kw2 + +def func_with_signature(a: int, b: float) -> float: + return a + b +""") + + def test_memory_func_with_kwonly_args(): + mem = Memory(cachedir=env['dir'], verbose=0) + func_cached = mem.cache(func_with_kwonly_args) + + nose.tools.assert_equal(func_cached(1, 2, kw1=3), (1, 2, 3, 'kw2')) + + # Making sure that providing a keyword-only argument by + # position raises an exception + assert_raises_regex( + ValueError, + "Keyword-only parameter 'kw1' was passed as positional parameter", + func_cached, + 1, 2, 3, {'kw2': 4}) + + # Keyword-only parameter passed by position with cached call + # should still raise ValueError + func_cached(1, 2, kw1=3, kw2=4) + + assert_raises_regex( + ValueError, + "Keyword-only parameter 'kw1' was passed as positional parameter", + func_cached, + 1, 2, 3, {'kw2': 4}) + + # Test 'ignore' parameter + func_cached = mem.cache(func_with_kwonly_args, ignore=['kw2']) + nose.tools.assert_equal(func_cached(1, 2, kw1=3, kw2=4), (1, 2, 3, 4)) + nose.tools.assert_equal(func_cached(1, 2, kw1=3, kw2='ignored'), (1, 2, 3, 4)) + + + def test_memory_func_with_signature(): + mem = Memory(cachedir=env['dir'], verbose=0) + func_cached = mem.cache(func_with_signature) + + nose.tools.assert_equal(func_cached(1, 2.), 3.)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 1 }
0.9
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "nose", "coverage", "pytest" ], "pre_install": null, "python": "3.5", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 coverage==6.2 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work -e git+https://github.com/joblib/joblib.git@c60d263fcc71ba9f4532010b732cde42e437039b#egg=joblib more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work nose==1.3.7 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: joblib channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==6.2 - nose==1.3.7 prefix: /opt/conda/envs/joblib
[ "joblib/test/test_func_inspect.py::test_filter_args_python_3", "joblib/test/test_memory.py::test_memory_func_with_kwonly_args", "joblib/test/test_memory.py::test_memory_func_with_signature" ]
[]
[ "joblib/test/test_func_inspect.py::test_filter_args_method", "joblib/test/test_func_inspect.py::test_filter_kwargs", "joblib/test/test_func_inspect.py::test_filter_args_2", "joblib/test/test_func_inspect.py::test_func_inspect_errors", "joblib/test/test_func_inspect.py::test_bound_methods", "joblib/test/test_func_inspect.py::test_filter_args_error_msg", "joblib/test/test_func_inspect.py::test_clean_win_chars", "joblib/test/test_func_inspect.py::test_format_signature", "joblib/test/test_func_inspect.py::test_special_source_encoding", "joblib/test/test_func_inspect.py::test_func_code_consistency", "joblib/test/test_memory.py::test_memory_warning_lambda_collisions", "joblib/test/test_memory.py::test_argument_change", "joblib/test/test_memory.py::test_call_and_shelve", "joblib/test/test_memory.py::test_memorized_pickling", "joblib/test/test_memory.py::test_memorized_repr", "joblib/test/test_memory.py::test_memory_file_modification", "joblib/test/test_memory.py::test_memory_in_memory_function_code_change", "joblib/test/test_memory.py::test_clear_memory_with_none_cachedir" ]
[]
BSD 3-Clause "New" or "Revised" License
328
coleifer__peewee-790
f424cd64352ecc799c3c2d60aafe17dbb30f58ef
2015-12-10 17:14:23
ffcdf8786f46bf612c5a3973f29bf87b3b4c74aa
diff --git a/.gitignore b/.gitignore index 6e0d4e46..b348417c 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,4 @@ playhouse/_speedups.so playhouse/tests/peewee_test.db .idea/ MANIFEST +peewee_test.db diff --git a/peewee.py b/peewee.py index 827d0896..c910682d 100644 --- a/peewee.py +++ b/peewee.py @@ -1038,7 +1038,12 @@ class UUIDField(Field): db_field = 'uuid' def db_value(self, value): - return None if value is None else str(value) + if isinstance(value, uuid.UUID): + return value.hex + try: + return uuid.UUID(value).hex + except: + return value def python_value(self, value): return None if value is None else uuid.UUID(value) @@ -3751,6 +3756,7 @@ class MySQLDatabase(Database): 'float': 'FLOAT', 'primary_key': 'INTEGER AUTO_INCREMENT', 'text': 'LONGTEXT', + 'uuid': 'LONGTEXT' } for_update = True interpolation = '%s'
Better support for UUIDField As an extension from #780, databases which don't support UUID natively may encounter consistency problems. Postgres will correctly handle multiple input variants [\[1\]][1], such as `a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11` and `a0eebc999c0b4ef8bb6d6bb9bd380a11`. However SQLite, which now uses `TEXT` since #780, does not support multiple input types properly, and will end up with different variants being stored in the backend, rather than being normalised. Other databases, such as MySQL, are not supported at all as they don't have the necessary `field_overrides`. The most viable fix I can see for this would be; ```py class UUIDField(Field): ... def db_value(self, value): if isinstance(value, uuid.UUID): return value.hex try: return uuid.UUID(value).hex except: return value ``` The above would ensure that the UUID output is always consistent, whilst also allowing non confirming values to be used with `filter()`, e.g. `0f0f0f0f-` I'd also like to propose that we add a `field_override` for `MySQLDatabase`, much the same as `SqliteDatabase`. If you're happy with the above solution, I'll throw up a PR, let me know [1]: http://www.postgresql.org/docs/9.1/static/datatype-uuid.html
coleifer/peewee
diff --git a/playhouse/tests/test_fields.py b/playhouse/tests/test_fields.py index a9cfbcaf..1af7ee50 100644 --- a/playhouse/tests/test_fields.py +++ b/playhouse/tests/test_fields.py @@ -726,7 +726,6 @@ class TestServerDefaults(ModelTestCase): self.assertEqual(sd2_db.timestamp, datetime.datetime(2015, 1, 2, 3, 4)) -@skip_if(lambda: isinstance(test_db, MySQLDatabase)) class TestUUIDField(ModelTestCase): requires = [ TestingID, @@ -745,6 +744,29 @@ class TestUUIDField(ModelTestCase): t2 = TestingID.get(TestingID.uniq == uuid_obj) self.assertEqual(t1, t2) + def test_uuid_casting(self): + uuid_obj = uuid.UUID('a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11') + uuid_str = uuid_obj.hex + uuid_str_short = uuid_str.replace("-", "") + + t1 = TestingID.create(uniq=uuid_obj) + t1_db = TestingID.get(TestingID.uniq == uuid_str) + self.assertEqual(t1_db.uniq, uuid_obj) + t1_db = TestingID.get(TestingID.uniq == uuid_str_short) + self.assertEqual(t1_db.uniq, uuid_obj) + + t1 = TestingID.create(uniq=uuid_str) + t1_db = TestingID.get(TestingID.uniq == uuid_str) + self.assertEqual(t1_db.uniq, uuid_obj) + t1_db = TestingID.get(TestingID.uniq == uuid_str_short) + self.assertEqual(t1_db.uniq, uuid_obj) + + t1 = TestingID.create(uniq=uuid_str_short) + t1_db = TestingID.get(TestingID.uniq == uuid_str) + self.assertEqual(t1_db.uniq, uuid_obj) + t1_db = TestingID.get(TestingID.uniq == uuid_str_short) + self.assertEqual(t1_db.uniq, uuid_obj) + def test_uuid_foreign_keys(self): data_a = UUIDData.create(id=uuid.uuid4(), data='a') data_b = UUIDData.create(id=uuid.uuid4(), data='b')
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_issue_reference", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 2 }
2.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "Cython" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
Cython==3.0.12 exceptiongroup==1.2.2 iniconfig==2.1.0 packaging==24.2 -e git+https://github.com/coleifer/peewee.git@f424cd64352ecc799c3c2d60aafe17dbb30f58ef#egg=peewee pluggy==1.5.0 pytest==8.3.5 tomli==2.2.1
name: peewee channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - cython==3.0.12 - exceptiongroup==1.2.2 - iniconfig==2.1.0 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - tomli==2.2.1 prefix: /opt/conda/envs/peewee
[ "playhouse/tests/test_fields.py::TestFieldTypes::test_null_query", "playhouse/tests/test_fields.py::TestFieldTypes::test_regexp", "playhouse/tests/test_fields.py::TestDateTimeExtract::test_extract_date", "playhouse/tests/test_fields.py::TestDateTimeExtract::test_extract_date_where", "playhouse/tests/test_fields.py::TestDateTimeExtract::test_extract_datetime", "playhouse/tests/test_fields.py::TestDateTimeExtract::test_extract_datetime_where", "playhouse/tests/test_fields.py::TestDateTimeExtract::test_extract_time", "playhouse/tests/test_fields.py::TestDateTimeExtract::test_extract_time_where", "playhouse/tests/test_fields.py::TestUniqueColumnConstraint::test_multi_index", "playhouse/tests/test_fields.py::TestUniqueColumnConstraint::test_unique", "playhouse/tests/test_fields.py::TestNonIntegerPrimaryKey::test_non_int_fk", "playhouse/tests/test_fields.py::TestNonIntegerPrimaryKey::test_non_int_pk", "playhouse/tests/test_fields.py::TestPrimaryKeyIsForeignKey::test_pk_fk_relations", "playhouse/tests/test_fields.py::TestPrimaryKeyIsForeignKey::test_primary_foreign_key", "playhouse/tests/test_fields.py::TestFieldDatabaseColumn::test_db_column", "playhouse/tests/test_fields.py::TestFieldDatabaseColumn::test_select", "playhouse/tests/test_fields.py::TestUUIDField::test_uuid_casting" ]
[ "playhouse/tests/test_fields.py::TestFieldTypes::test_between", "playhouse/tests/test_fields.py::TestFieldTypes::test_endswith", "playhouse/tests/test_fields.py::TestFieldTypes::test_in_", "playhouse/tests/test_fields.py::TestFieldTypes::test_startswith", "playhouse/tests/test_fields.py::TestFieldTypes::test_blob_field", "playhouse/tests/test_fields.py::TestFieldTypes::test_contains", "playhouse/tests/test_fields.py::TestFieldTypes::test_date_and_time_fields", "playhouse/tests/test_fields.py::TestFieldTypes::test_date_as_string", "playhouse/tests/test_fields.py::TestFieldTypes::test_field_types", "playhouse/tests/test_fields.py::TestFieldTypes::test_fixed_charfield", "playhouse/tests/test_fields.py::TestFieldTypes::test_floatfield", "playhouse/tests/test_fields.py::TestFieldTypes::test_intfield", "playhouse/tests/test_fields.py::TestFieldTypes::test_time_field_python_value", "playhouse/tests/test_fields.py::TestFieldTypes::test_various_formats", "playhouse/tests/test_fields.py::TestSQLiteDatePart::test_sqlite_date_part" ]
[ "playhouse/tests/test_fields.py::TestFieldTypes::test_concat", "playhouse/tests/test_fields.py::TestFieldTypes::test_decimalfield", "playhouse/tests/test_fields.py::TestSQLiteDateTrunc::test_sqlite_date_trunc", "playhouse/tests/test_fields.py::TestCheckConstraints::test_check_constraint", "playhouse/tests/test_fields.py::TestServerDefaults::test_server_default", "playhouse/tests/test_fields.py::TestUUIDField::test_prefetch_regression", "playhouse/tests/test_fields.py::TestUUIDField::test_uuid", "playhouse/tests/test_fields.py::TestUUIDField::test_uuid_foreign_keys" ]
[ "playhouse/tests/test_fields.py::TestFieldTypes::test_blob_field_mysql", "playhouse/tests/test_fields.py::TestFieldTypes::test_boolfield", "playhouse/tests/test_fields.py::TestFieldTypes::test_charfield" ]
MIT License
329
mozilla__puente-54
f12f27e2230f071f474b394d1ac908d2ed3476e2
2015-12-10 20:46:56
f78d702e0d1376425d8d613a6573a896fc8d11a1
diff --git a/puente/commands.py b/puente/commands.py index c21cdd3..a36d035 100644 --- a/puente/commands.py +++ b/puente/commands.py @@ -129,11 +129,10 @@ def extract_command(outputdir, domain_methods, text_domain, keywords, print('Done') -def merge_command(create, backup, base_dir, domain_methods, languages): +def merge_command(create, base_dir, domain_methods, languages): """ :arg create: whether or not to create directories if they don't exist - :arg backup: whether or not to create backup .po files :arg base_dir: BASE_DIR setting :arg domain_methods: DOMAIN_METHODS setting :arg languages: LANGUAGES setting @@ -214,7 +213,6 @@ def merge_command(create, backup, base_dir, domain_methods, languages): 'msgmerge', '--update', '--width=200', - '--backup=%s' % ('simple' if backup else 'off'), domain_po, '-' ] diff --git a/puente/management/commands/merge.py b/puente/management/commands/merge.py index acf9b26..74de7d9 100644 --- a/puente/management/commands/merge.py +++ b/puente/management/commands/merge.py @@ -32,17 +32,11 @@ class Command(BaseCommand): action='store_true', dest='create', default=False, help='Create locale subdirectories' ), - make_option( - '-b', '--backup', - action='store_true', dest='backup', default=False, - help='Create backup files of .po files' - ), ) def handle(self, *args, **options): return merge_command( create=options.get('create'), - backup=options.get('backup'), base_dir=get_setting('BASE_DIR'), domain_methods=get_setting('DOMAIN_METHODS'), languages=getattr(settings, 'LANGUAGES', []) diff --git a/puente/utils.py b/puente/utils.py index e631102..ee1cc28 100644 --- a/puente/utils.py +++ b/puente/utils.py @@ -95,8 +95,20 @@ def generate_keywords(additional_keywords=None): # Shallow copy keywords = dict(BABEL_KEYWORDS) - keywords['_lazy'] = None - # FIXME: Add other keywords from Django here + keywords.update({ + '_lazy': None, + 'gettext_lazy': None, + 'ugettext_lazy': None, + 'gettext_noop': None, + 'ugettext_noop': None, + + 'ngettext_lazy': (1, 2), + 'ungettext_lazy': (1, 2), + + 'npgettext': ((1, 'c'), 2, 3), + 'pgettext_lazy': ((1, 'c'), 2), + 'npgettext_lazy': ((1, 'c'), 2, 3), + }) # Add specified keywords if additional_keywords:
add django gettext keywords * upgettext * upgettext_lazy * pgettext * pgettext_lazy * etc Add all the ones that we're currently missing.
mozilla/puente
diff --git a/tests/test_extract.py b/tests/test_extract.py index bc91f54..81fb63e 100644 --- a/tests/test_extract.py +++ b/tests/test_extract.py @@ -282,3 +282,63 @@ class TestExtractCommand: """) ) + + def test_django_pgettext_keywords(self, tmpdir): + # Test context + tmpdir.join('foo.py').write(dedent("""\ + pgettext("context1", "string1") + pgettext_lazy("context2", "string2") + npgettext("context3", "string3", "plural3", 5) + npgettext_lazy("context4", "string4", "plural4", 5) + """)) + + # Extract + extract_command( + outputdir=str(tmpdir), + domain_methods={ + 'django': [ + ('*.py', 'python'), + ] + }, + text_domain=puente_settings.TEXT_DOMAIN, + keywords=puente_settings.KEYWORDS, + comment_tags=puente_settings.COMMENT_TAGS, + base_dir=str(tmpdir), + project=puente_settings.PROJECT, + version=puente_settings.VERSION, + msgid_bugs_address=puente_settings.MSGID_BUGS_ADDRESS, + ) + + # Verify contents + assert os.path.exists(str(tmpdir.join('django.pot'))) + pot_file = nix_header(tmpdir.join('django.pot').read()) + assert ( + pot_file == + dedent("""\ + #: foo.py:1 + msgctxt "context1" + msgid "string1" + msgstr "" + + #: foo.py:2 + msgctxt "context2" + msgid "string2" + msgstr "" + + #: foo.py:3 + msgctxt "context3" + msgid "string3" + msgid_plural "plural3" + msgstr[0] "" + msgstr[1] "" + + #: foo.py:4 + msgctxt "context4" + msgid "string4" + msgid_plural "plural4" + msgstr[0] "" + msgstr[1] "" + + """) + ) + diff --git a/tests/test_merge.py b/tests/test_merge.py index 60b46f1..29e1456 100644 --- a/tests/test_merge.py +++ b/tests/test_merge.py @@ -65,7 +65,6 @@ class TestMergecommand: merge_command( create=True, - backup=True, base_dir=str(tmpdir), domain_methods={ 'django': [ @@ -84,7 +83,6 @@ class TestMergecommand: with pytest.raises(CommandError): merge_command( create=True, - backup=True, base_dir=str(tmpdir), domain_methods={ 'django': [
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 3 }
0.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-pythonpath", "pytest-django" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements.txt", "requirements-dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.16 asgiref==3.8.1 attrs==25.3.0 babel==2.17.0 backports.tarfile==1.2.0 build==1.2.2.post1 cachetools==5.5.2 certifi==2025.1.31 cffi==1.17.1 chardet==5.2.0 charset-normalizer==3.4.1 check-manifest==0.50 colorama==0.4.6 cryptography==44.0.2 distlib==0.3.9 Django==4.2.20 django-jinja==2.11.0 docutils==0.21.2 filelock==3.18.0 id==1.5.0 idna==3.10 imagesize==1.4.1 importlib_metadata==8.6.1 iniconfig==2.1.0 jaraco.classes==3.4.0 jaraco.context==6.0.1 jaraco.functools==4.1.0 jeepney==0.9.0 Jinja2==3.1.6 keyring==25.6.0 markdown-it-py==3.0.0 MarkupSafe==3.0.2 mdurl==0.1.2 more-itertools==10.6.0 nh3==0.2.21 packaging==24.2 platformdirs==4.3.7 pluggy==1.5.0 -e git+https://github.com/mozilla/puente.git@f12f27e2230f071f474b394d1ac908d2ed3476e2#egg=puente py==1.11.0 pycparser==2.22 Pygments==2.19.1 pyproject-api==1.9.0 pyproject_hooks==1.2.0 pytest==6.2.5 pytest-django==4.5.2 pytest-pythonpath==0.7.4 readme_renderer==44.0 requests==2.32.3 requests-toolbelt==1.0.0 rfc3986==2.0.0 rich==14.0.0 SecretStorage==3.3.3 snowballstemmer==2.2.0 Sphinx==7.4.7 sphinxcontrib-applehelp==2.0.0 sphinxcontrib-devhelp==2.0.0 sphinxcontrib-htmlhelp==2.1.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==2.0.0 sphinxcontrib-serializinghtml==2.0.0 sqlparse==0.5.3 swebench_matterhorn @ file:///swebench_matterhorn toml==0.10.2 tomli==2.2.1 tox==4.25.0 twine==6.1.0 typing_extensions==4.13.0 urllib3==2.3.0 virtualenv==20.29.3 zipp==3.21.0
name: puente channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.16 - asgiref==3.8.1 - attrs==25.3.0 - babel==2.17.0 - backports-tarfile==1.2.0 - build==1.2.2.post1 - cachetools==5.5.2 - certifi==2025.1.31 - cffi==1.17.1 - chardet==5.2.0 - charset-normalizer==3.4.1 - check-manifest==0.50 - colorama==0.4.6 - cryptography==44.0.2 - distlib==0.3.9 - django==4.2.20 - django-jinja==2.11.0 - docutils==0.21.2 - filelock==3.18.0 - id==1.5.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - jaraco-classes==3.4.0 - jaraco-context==6.0.1 - jaraco-functools==4.1.0 - jeepney==0.9.0 - jinja2==3.1.6 - keyring==25.6.0 - markdown-it-py==3.0.0 - markupsafe==3.0.2 - mdurl==0.1.2 - more-itertools==10.6.0 - nh3==0.2.21 - packaging==24.2 - platformdirs==4.3.7 - pluggy==1.5.0 - py==1.11.0 - pycparser==2.22 - pygments==2.19.1 - pyproject-api==1.9.0 - pyproject-hooks==1.2.0 - pytest==6.2.5 - pytest-django==4.5.2 - pytest-pythonpath==0.7.4 - readme-renderer==44.0 - requests==2.32.3 - requests-toolbelt==1.0.0 - rfc3986==2.0.0 - rich==14.0.0 - secretstorage==3.3.3 - snowballstemmer==2.2.0 - sphinx==7.4.7 - sphinxcontrib-applehelp==2.0.0 - sphinxcontrib-devhelp==2.0.0 - sphinxcontrib-htmlhelp==2.1.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==2.0.0 - sphinxcontrib-serializinghtml==2.0.0 - sqlparse==0.5.3 - swebench-matterhorn==0.0.0 - toml==0.10.2 - tomli==2.2.1 - tox==4.25.0 - twine==6.1.0 - typing-extensions==4.13.0 - urllib3==2.3.0 - virtualenv==20.29.3 - zipp==3.21.0 prefix: /opt/conda/envs/puente
[ "tests/test_merge.py::TestMergecommand::test_missing_pot_file" ]
[ "tests/test_extract.py::TestManageExtract::test_help", "tests/test_merge.py::TestManageMerge::test_help", "tests/test_extract.py::TestExtractCommand::test_basic_extraction", "tests/test_extract.py::TestExtractCommand::test_header", "tests/test_extract.py::TestExtractCommand::test_whitespace_collapsing", "tests/test_extract.py::TestExtractCommand::test_context", "tests/test_extract.py::TestExtractCommand::test_plurals", "tests/test_extract.py::TestExtractCommand::test_django_pgettext_keywords", "tests/test_merge.py::TestMergecommand::test_basic" ]
[]
[]
BSD 3-Clause "New" or "Revised" License
330
cmc-python__modelmachine-12
11fd90f0eb9b713b40be7246b25ca817702af930
2015-12-11 13:53:42
fa9275d64498c7cbe24f02357bbb1bc971670756
diff --git a/.gitignore b/.gitignore index a9ce44b..ee5aa21 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ -.*.swp # Vim +*.swp # Vim # Byte-compiled / optimized / DLL files __pycache__/ diff --git a/README.md b/README.md index d01c4fe..da57d66 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # modelmachine Model machine emulator -[![Build Status](https://travis-ci.org/cmc-python/modelmachine.svg?branch=master)](https://travis-ci.org/cmc-python/modelmachine) +[![Build Status](https://travis-ci.org/vslutov/modelmachine.svg?branch=master)](https://travis-ci.org/vslutov/modelmachine) ## TODO diff --git a/modelmachine/__main__.py b/modelmachine/__main__.py index 23b5e8e..852943e 100644 --- a/modelmachine/__main__.py +++ b/modelmachine/__main__.py @@ -3,41 +3,62 @@ """Modelmachine - model machine emulator.""" from modelmachine.ide import get_program, get_cpu, debug -import pytest, os, sys +import pytest, os, sys, argparse VERSION = "0.0.6" # Don't forget fix in setup.py -USAGE = '''Usage: modelmachine command [file] -Available commands: - test : run internal tests - run <filename> : execute filename - debug <filename> : debug filename - version : print version and exit - help : print this help and exit''' +def run_program(args): + cpu = get_program(args.filename, args.protect_memory) + cpu.run_file(args.filename) +def run_debug(args): + cpu = get_program(args.filename, args.protect_memory) + debug(cpu) -def main(argv, stdin, stdout): +def run_tests(args): + path = os.path.abspath(os.path.dirname(__file__)) + sys.argv[1] = path + pytest.main() + +def main(argv, stdout): """Execute, when user call modelmachine.""" - stdin = stdin - if len(argv) == 2 and argv[1] == "test": - path = os.path.abspath(os.path.dirname(__file__)) - argv[1] = path - pytest.main() - elif len(argv) == 3 and argv[1] == "debug": - filename = argv[2] - cpu = get_program(filename) - debug(cpu) - elif len(argv) == 3 and argv[1] == "run": - filename = argv[2] - cpu = get_program(filename) - cpu.run_file(filename) - elif len(argv) == 2 and argv[1] == "version": + parser = argparse.ArgumentParser(description='Run modelmachine.', add_help=False) + + group = parser.add_mutually_exclusive_group() + + group.add_argument('-h', '--help', action='store_true', default=False, + help='show this help message and exit') + group.add_argument('-v', '--version', action='store_true', default=False, + help='print version and exit') + + parser.add_argument('-m', '--protect_memory', action='store_true', default=False, + help='raise an error if try to read dirty memory') + subparsers = parser.add_subparsers(title='commands', + help='commands for model machine emulator') + + run = subparsers.add_parser('run', help='run program') + run.add_argument('filename', help='file with source code') + run.set_defaults(func=run_program) + + debug = subparsers.add_parser('debug', help='run program in debug mode') + debug.add_argument('filename', help='file with source code') + debug.set_defaults(func=run_debug) + + test = subparsers.add_parser('test', help='run internal tests end exit') + test.set_defaults(func=run_tests) + + args = parser.parse_args(argv[1:]) + + if args.version: print("ModelMachine", VERSION, file=stdout) + elif args.help: + parser.print_help(stdout) else: - print(USAGE, file=stdout) - if not (len(argv) == 2 and argv[1] == "help"): - exit(1) + args.func(args) def exec_main(): """Hook for testability.""" - main(sys.argv, sys.stdin, sys.stdout) + main(sys.argv, sys.stdout) + +if __name__ == '__main__': + exec_main() diff --git a/modelmachine/cpu.py b/modelmachine/cpu.py index bb4faeb..8519e9c 100644 --- a/modelmachine/cpu.py +++ b/modelmachine/cpu.py @@ -70,13 +70,15 @@ class AbstractCPU: self.io_unit.load_source(code) - input_addresses = [int(x, 0) for x in self.config['input'].split(',')] - self.io_unit.load_data(input_addresses, data) + if 'input' in self.config: + input_addresses = [int(x, 0) for x in self.config['input'].split(',')] + self.io_unit.load_data(input_addresses, data) def print_result(self, output=sys.stdout): """Print calculation result.""" - for address in (int(x, 0) for x in self.config['output'].split(',')): - print(self.io_unit.get_int(address), file=output) + if 'output' in self.config: + for address in (int(x, 0) for x in self.config['output'].split(',')): + print(self.io_unit.get_int(address), file=output) def run_file(self, filename, output=sys.stdout): """Run all execution cycle.""" @@ -90,7 +92,7 @@ class BordachenkovaMM3(AbstractCPU): """Bordachenkova model machine 3.""" - def __init__(self): + def __init__(self, protect_memory): """See help(type(x)).""" word_size = 7 * 8 address_size = 2 * 8 @@ -98,7 +100,7 @@ class BordachenkovaMM3(AbstractCPU): self.ram = RandomAccessMemory(word_size=word_size, memory_size=memory_size, endianess='big', # Unused - is_protected=True) + is_protected=protect_memory) self.registers = RegisterMemory() self.register_names = BCU3.register_names self.alu = ArithmeticLogicUnit(registers=self.registers, @@ -119,7 +121,7 @@ class BordachenkovaMM2(AbstractCPU): """Bordachenkova model machine 2.""" - def __init__(self): + def __init__(self, protect_memory): """See help(type(x)).""" word_size = 5 * 8 address_size = 2 * 8 @@ -127,7 +129,7 @@ class BordachenkovaMM2(AbstractCPU): self.ram = RandomAccessMemory(word_size=word_size, memory_size=memory_size, endianess='big', # Unused - is_protected=True) + is_protected=protect_memory) self.registers = RegisterMemory() self.register_names = BCU2.register_names self.alu = ArithmeticLogicUnit(registers=self.registers, @@ -148,7 +150,7 @@ class BordachenkovaMMV(AbstractCPU): """Bordachenkova variable model machine.""" - def __init__(self): + def __init__(self, protect_memory): """See help(type(x)).""" byte_size = 8 word_size = 5 * byte_size @@ -157,7 +159,7 @@ class BordachenkovaMMV(AbstractCPU): self.ram = RandomAccessMemory(word_size=byte_size, memory_size=memory_size, endianess='big', - is_protected=True) + is_protected=protect_memory) self.registers = RegisterMemory() self.register_names = BCUV.register_names self.alu = ArithmeticLogicUnit(registers=self.registers, @@ -178,7 +180,7 @@ class BordachenkovaMM1(AbstractCPU): """Bordachenkova model machine 1.""" - def __init__(self): + def __init__(self, protect_memory): """See help(type(x)).""" word_size = 3 * 8 address_size = 2 * 8 @@ -186,7 +188,7 @@ class BordachenkovaMM1(AbstractCPU): self.ram = RandomAccessMemory(word_size=word_size, memory_size=memory_size, endianess='big', # Unused - is_protected=True) + is_protected=protect_memory) self.registers = RegisterMemory() self.register_names = BCU1.register_names self.alu = ArithmeticLogicUnit(registers=self.registers, @@ -208,7 +210,7 @@ class BordachenkovaMMS(AbstractCPU): """Bordachenkova stack model machine.""" - def __init__(self): + def __init__(self, protect_memory): """See help(type(x)).""" byte_size = 8 word_size = 3 * byte_size @@ -217,7 +219,7 @@ class BordachenkovaMMS(AbstractCPU): self.ram = RandomAccessMemory(word_size=byte_size, memory_size=memory_size, endianess='big', # Unused - is_protected=True) + is_protected=protect_memory) self.registers = RegisterMemory() self.register_names = BCUS.register_names self.alu = ArithmeticLogicUnit(registers=self.registers, diff --git a/modelmachine/ide.py b/modelmachine/ide.py index 954cab3..a5d154c 100644 --- a/modelmachine/ide.py +++ b/modelmachine/ide.py @@ -147,20 +147,20 @@ def debug(cpu): else: need_help = True -def get_cpu(source): +def get_cpu(source, protect_memory): """Return empty cpu or raise the ValueError.""" arch = source[0].strip() if arch in CPU_LIST: - cpu = CPU_LIST[arch]() + cpu = CPU_LIST[arch](protect_memory) return cpu else: raise ValueError('Unexpected arch (found in first line): {arch}' .format(arch=arch)) -def get_program(filename): +def get_program(filename, protect_memory): """Read model machine program.""" with open(filename, 'r') as source_file: source = source_file.readlines() - cpu = get_cpu(source) + cpu = get_cpu(source, protect_memory) cpu.load_program(source) return cpu diff --git a/samples/minimal.mmach b/samples/minimal.mmach new file mode 100644 index 0000000..40b875b --- /dev/null +++ b/samples/minimal.mmach @@ -0,0 +1,9 @@ +bordachenkova_mms + +[config] + +[code] +99 ; halt + +[input] +
Поправить RandomAcessMemory.is_protected - Проверить, что при is_protected=False машина выдает нули и не падает. - Поправить машину с переменной длиной адреса, устранить лишние считывания. - Сделать параметр командной строки для управления этим параметром.
cmc-python/modelmachine
diff --git a/modelmachine/tests/test_cpu.py b/modelmachine/tests/test_cpu.py index 08a5e25..5715ea8 100644 --- a/modelmachine/tests/test_cpu.py +++ b/modelmachine/tests/test_cpu.py @@ -109,7 +109,7 @@ class TestBordachenkovaMM3: def setup(self): """Init state.""" - self.cpu = BordachenkovaMM3() + self.cpu = BordachenkovaMM3(protect_memory=False) self.source = ("[config]\ninput=0x101,0x102\noutput=0x103\n" + "[code]\n01 0101 0102 0103\n80 0000 0000 0003\n" + "02 0103 0103 0103; never be used\n" + @@ -137,7 +137,7 @@ class TestBordachenkovaMM2: def setup(self): """Init state.""" - self.cpu = BordachenkovaMM2() + self.cpu = BordachenkovaMM2(protect_memory=False) self.source = ("[config]\n" + "input=0x101,0x102\n" + "output=0x103\n" + @@ -172,7 +172,7 @@ class TestBordachenkovaMMV: def setup(self): """Init state.""" - self.cpu = BordachenkovaMMV() + self.cpu = BordachenkovaMMV(protect_memory=False) self.source = ("[config]\n" + "input=0x100,0x105\n" + "output=0x10a\n" + @@ -208,7 +208,7 @@ class TestBordachenkovaMM1: def setup(self): """Init state.""" - self.cpu = BordachenkovaMM1() + self.cpu = BordachenkovaMM1(protect_memory=False) self.source = ("[config]\n" + "input=0x101,0x102\n" + "output=0x103\n" + @@ -246,7 +246,7 @@ class TestBordachenkovaMMS: def setup(self): """Init state.""" - self.cpu = BordachenkovaMMS() + self.cpu = BordachenkovaMMS(protect_memory=False) self.source = ("[config]\n" + "input=0x100,0x103\n" + "output=0x106\n" + diff --git a/modelmachine/tests/test_cu_bord_variable.py b/modelmachine/tests/test_cu_bord_variable.py index d7e83da..135823a 100644 --- a/modelmachine/tests/test_cu_bord_variable.py +++ b/modelmachine/tests/test_cu_bord_variable.py @@ -26,7 +26,7 @@ class TestBordachenkovaControlUnitV(TBCU2): def setup(self): """Init state.""" super().setup() - self.ram = RandomAccessMemory(BYTE_SIZE, 256, 'big') + self.ram = RandomAccessMemory(BYTE_SIZE, 256, 'big', is_protected=True) self.control_unit = BordachenkovaControlUnitV(WORD_SIZE, BYTE_SIZE, self.registers, @@ -158,6 +158,23 @@ class TestBordachenkovaControlUnitV(TBCU2): assert self.registers.fetch("IP", BYTE_SIZE) == 0x15 assert self.control_unit.get_status() == HALTED + def test_minimal_run(self): + """Minimal program.""" + self.control_unit.registers = self.registers = RegisterMemory() + self.registers.add_register('IR', WORD_SIZE) + self.alu = ArithmeticLogicUnit(self.registers, + self.control_unit.register_names, + WORD_SIZE, + BYTE_SIZE) + self.control_unit.alu = self.alu + + self.ram.put(0x00, 0x99, BYTE_SIZE) + self.registers.put("IP", 0, BYTE_SIZE) + + self.control_unit.run() + assert self.registers.fetch("IP", BYTE_SIZE) == 0x01 + assert self.control_unit.get_status() == HALTED + class TestBordachenkovaControlUnitS(TBCU2): @@ -166,7 +183,7 @@ class TestBordachenkovaControlUnitS(TBCU2): def setup(self): """Init state.""" super().setup() - self.ram = RandomAccessMemory(BYTE_SIZE, 256, 'big') + self.ram = RandomAccessMemory(BYTE_SIZE, 256, 'big', is_protected=True) self.control_unit = BordachenkovaControlUnitS(WORD_SIZE, BYTE_SIZE, self.registers, @@ -457,3 +474,23 @@ class TestBordachenkovaControlUnitS(TBCU2): assert self.registers.fetch("IP", BYTE_SIZE) == 0x1a assert self.registers.fetch("SP", BYTE_SIZE) == 0 assert self.control_unit.get_status() == HALTED + + def test_minimal_run(self): + """Very simple program.""" + self.control_unit.registers = self.registers = RegisterMemory() + self.registers.add_register("IR", WORD_SIZE) + self.registers.add_register("SP", BYTE_SIZE) + self.registers.put("SP", 0, BYTE_SIZE) + self.alu = ArithmeticLogicUnit(self.registers, + self.control_unit.register_names, + WORD_SIZE, + BYTE_SIZE) + self.control_unit.alu = self.alu + + self.ram.put(0x00, 0x99, BYTE_SIZE) + self.registers.put("IP", 0, BYTE_SIZE) + + self.control_unit.run() + assert self.registers.fetch("IP", BYTE_SIZE) == 0x01 + assert self.registers.fetch("SP", BYTE_SIZE) == 0 + assert self.control_unit.get_status() == HALTED diff --git a/modelmachine/tests/test_ide.py b/modelmachine/tests/test_ide.py index b58861e..18c9635 100644 --- a/modelmachine/tests/test_ide.py +++ b/modelmachine/tests/test_ide.py @@ -10,15 +10,15 @@ from pytest import raises def test_get_cpu(): """Test define cpu method.""" - ide.CPU_LIST = {"mm1": create_autospec(AbstractCPU, True)} + ide.CPU_LIST = {"abstract_cpu_test": create_autospec(AbstractCPU, True)} with raises(ValueError): - ide.get_cpu(["not_found_cpu", "[config]", "[code]", "00 00", "[input]"]) + ide.get_cpu(["not_found_cpu", "[config]", "[code]", "00 00", "[input]"], False) with raises(ValueError): - ide.get_cpu(["[config]", "[code]", "00 00", "[input]"]) + ide.get_cpu(["[config]", "[code]", "00 00", "[input]"], False) - cpu = ide.get_cpu(["mm1", "[config]", "key=value", - "[code]", "00 00", "99 00", "[input]"]) + cpu = ide.get_cpu(["abstract_cpu_test", "[config]", "key=value", + "[code]", "00 00", "99 00", "[input]"], False) assert isinstance(cpu, AbstractCPU) diff --git a/modelmachine/tests/test_main.py b/modelmachine/tests/test_main.py index 582eebe..0b89a0e 100644 --- a/modelmachine/tests/test_main.py +++ b/modelmachine/tests/test_main.py @@ -2,7 +2,7 @@ """Test case for cli part of modelmachine package.""" -from modelmachine.__main__ import main, VERSION, USAGE +from modelmachine.__main__ import main, VERSION from pytest import raises def test_version(tmpdir): @@ -10,19 +10,25 @@ def test_version(tmpdir): output_path = tmpdir.join('output.txt') with open(str(output_path), 'w') as stdout: - main(['modelmachine', 'version'], None, stdout) + main(['modelmachine', '--version'], stdout) + + assert output_path.read() == 'ModelMachine ' + VERSION + '\n' + + with open(str(output_path), 'w') as stdout: + main(['modelmachine', '-v'], stdout) assert output_path.read() == 'ModelMachine ' + VERSION + '\n' def test_usage(tmpdir): - """Test that it's print usage (with exit code 1).""" + """Test that it's print usage.""" output_path = tmpdir.join('output.txt') with open(str(output_path), 'w') as stdout: - main(['modelmachine', 'help'], None, stdout) - assert output_path.read() == USAGE + '\n' - - with open(str(output_path), 'w') as stdout: - with raises(SystemExit): - main(['modelmachine', 'wrong_command'], None, stdout) - assert output_path.read() == USAGE + '\n' + main(['modelmachine', '--help'], stdout) + assert output_path.read().startswith('usage:') + + # TODO: Add stderr capture + # with open(str(output_path), 'w') as stdout: + # with raises(SystemExit): + # main(['modelmachine', 'wrong_command'], stdout) + # assert output_path.read().startswith('usage:')
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 3, "issue_text_score": 2, "test_score": 1 }, "num_modified_files": 5 }
0.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pylint", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
astroid==3.3.9 dill==0.3.9 exceptiongroup==1.2.2 iniconfig==2.1.0 isort==6.0.1 mccabe==0.7.0 -e git+https://github.com/cmc-python/modelmachine.git@11fd90f0eb9b713b40be7246b25ca817702af930#egg=modelmachine packaging==24.2 platformdirs==4.3.7 pluggy==1.5.0 pylint==3.3.6 pytest==8.3.5 tomli==2.2.1 tomlkit==0.13.2 typing_extensions==4.13.0
name: modelmachine channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - astroid==3.3.9 - dill==0.3.9 - exceptiongroup==1.2.2 - iniconfig==2.1.0 - isort==6.0.1 - mccabe==0.7.0 - packaging==24.2 - platformdirs==4.3.7 - pluggy==1.5.0 - pylint==3.3.6 - pytest==8.3.5 - tomli==2.2.1 - tomlkit==0.13.2 - typing-extensions==4.13.0 prefix: /opt/conda/envs/modelmachine
[ "modelmachine/tests/test_ide.py::test_get_cpu", "modelmachine/tests/test_main.py::test_version", "modelmachine/tests/test_main.py::test_usage" ]
[ "modelmachine/tests/test_cpu.py::TestAbstractCPU::test_load_program", "modelmachine/tests/test_cpu.py::TestAbstractCPU::test_print_result", "modelmachine/tests/test_cpu.py::TestAbstractCPU::test_run_file", "modelmachine/tests/test_cpu.py::TestBordachenkovaMM3::test_smoke", "modelmachine/tests/test_cpu.py::TestBordachenkovaMM2::test_smoke", "modelmachine/tests/test_cpu.py::TestBordachenkovaMMV::test_smoke", "modelmachine/tests/test_cpu.py::TestBordachenkovaMM1::test_smoke", "modelmachine/tests/test_cpu.py::TestBordachenkovaMMS::test_smoke", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_const", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_fetch_instruction", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_basic_execute", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_execute_cond_jumps", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_jump_halt", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_execute_jump_halt", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_execute_comp", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_write_back", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_fetch_and_decode", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_load", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_step", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_run", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitV::test_minimal_run", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_const", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_fetch_instruction", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_execute_cond_jumps", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_jump_halt", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_execute_jump_halt", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_execute_comp", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_fetch_and_decode", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_push", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_pop", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_load", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_basic_execute", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_execute_stack", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_write_back", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_step", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_run", "modelmachine/tests/test_cu_bord_variable.py::TestBordachenkovaControlUnitS::test_minimal_run" ]
[]
[]
Do What The F*ck You Want To Public License
331
Shopify__shopify_python_api-129
28c00a110c23edc5287d6e8f90f0e36f0eb5d1b3
2015-12-11 18:31:45
c29e0ecbed9de67dd923f980a3ac053922dab75e
diff --git a/shopify/resources/__init__.py b/shopify/resources/__init__.py index 2de7499..adacc08 100644 --- a/shopify/resources/__init__.py +++ b/shopify/resources/__init__.py @@ -46,5 +46,6 @@ from .policy import Policy from .smart_collection import SmartCollection from .gift_card import GiftCard from .discount import Discount +from .shipping_zone import ShippingZone from ..base import ShopifyResource diff --git a/shopify/resources/shipping_zone.py b/shopify/resources/shipping_zone.py new file mode 100644 index 0000000..49cd647 --- /dev/null +++ b/shopify/resources/shipping_zone.py @@ -0,0 +1,5 @@ +from ..base import ShopifyResource + + +class ShippingZone(ShopifyResource): + pass
Add support for the new Shipping Zone resource. As per https://ecommerce.shopify.com/c/api-announcements/t/shipping-zones-api-and-changes-to-the-countries-api-307687.
Shopify/shopify_python_api
diff --git a/test/fixtures/shipping_zones.json b/test/fixtures/shipping_zones.json new file mode 100644 index 0000000..f07b8ff --- /dev/null +++ b/test/fixtures/shipping_zones.json @@ -0,0 +1,114 @@ +{ + "shipping_zones": [ + { + "id": 1, + "name": "Some zone", + "countries": [ + { + "id": 817138619, + "name": "United States", + "tax": 0.0, + "code": "US", + "tax_name": "Federal Tax", + "provinces": [ + { + "id": 1013111685, + "country_id": 817138619, + "name": "New York", + "code": "NY", + "tax": 0.04, + "tax_name": "Tax", + "tax_type": null, + "shipping_zone_id": 1, + "tax_percentage": 4.0 + }, + { + "id": 1069646654, + "country_id": 817138619, + "name": "Ohio", + "code": "OH", + "tax": 0.0, + "tax_name": "State Tax", + "tax_type": null, + "shipping_zone_id": 1, + "tax_percentage": 0.0 + } + ] + }, + { + "id": 879921427, + "name": "Canada", + "tax": 0.05, + "code": "CA", + "tax_name": "GST", + "provinces": [ + { + "id": 702530425, + "country_id": 879921427, + "name": "Ontario", + "code": "ON", + "tax": 0.08, + "tax_name": "Tax", + "tax_type": null, + "shipping_zone_id": 1, + "tax_percentage": 8.0 + }, + { + "id": 224293623, + "country_id": 879921427, + "name": "Quebec", + "code": "QC", + "tax": 0.09, + "tax_name": "HST", + "tax_type": "compounded", + "shipping_zone_id": 1, + "tax_percentage": 9.0 + } + ] + }, + { + "id": 988409122, + "name": "Yemen", + "tax": 0.0, + "code": "YE", + "tax_name": "GST", + "provinces": [ + ] + } + ], + "weight_based_shipping_rates": [ + { + "id": 760465697, + "weight_low": 1.2, + "weight_high": 10.0, + "name": "Austria Express Heavy Shipping", + "price": "40.00", + "shipping_zone_id": 1 + } + ], + "price_based_shipping_rates": [ + { + "id": 583276424, + "name": "Standard Shipping", + "min_order_subtotal": "0.00", + "price": "10.99", + "max_order_subtotal": "2000.00", + "shipping_zone_id": 1 + } + ], + "carrier_shipping_rate_providers": [ + { + "id": 972083812, + "country_id": null, + "carrier_service_id": 61629186, + "flat_modifier": "0.00", + "percent_modifier": 0, + "service_filter": { + "*": "+" + }, + "shipping_zone_id": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/test/shipping_zone_test.py b/test/shipping_zone_test.py new file mode 100644 index 0000000..e81cfe6 --- /dev/null +++ b/test/shipping_zone_test.py @@ -0,0 +1,11 @@ +import shopify +from test.test_helper import TestCase + +class ShippingZoneTest(TestCase): + def test_get_shipping_zones(self): + self.fake("shipping_zones", method='GET', body=self.load_fixture('shipping_zones')) + shipping_zones = shopify.ShippingZone.find() + self.assertEqual(1,len(shipping_zones)) + self.assertEqual(shipping_zones[0].name,"Some zone") + self.assertEqual(3,len(shipping_zones[0].countries)) +
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_added_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 1 }, "num_modified_files": 1 }
2.1
{ "env_vars": null, "env_yml_path": [], "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup==1.2.2 iniconfig==2.1.0 packaging==24.2 pluggy==1.5.0 pyactiveresource==2.2.2 pytest==8.3.5 PyYAML==6.0.2 -e git+https://github.com/Shopify/shopify_python_api.git@28c00a110c23edc5287d6e8f90f0e36f0eb5d1b3#egg=ShopifyAPI six==1.17.0 tomli==2.2.1
name: shopify_python_api channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - exceptiongroup==1.2.2 - iniconfig==2.1.0 - packaging==24.2 - pluggy==1.5.0 - pyactiveresource==2.2.2 - pytest==8.3.5 - pyyaml==6.0.2 - six==1.17.0 - tomli==2.2.1 prefix: /opt/conda/envs/shopify_python_api
[ "test/shipping_zone_test.py::ShippingZoneTest::test_get_shipping_zones" ]
[]
[]
[]
MIT License
332
ifosch__accloudtant-26
66343620af6679769c3f5a7c727ae5d3492dc5ac
2015-12-11 18:48:30
66343620af6679769c3f5a7c727ae5d3492dc5ac
diff --git a/accloudtant/aws/prices/__init__.py b/accloudtant/aws/prices/__init__.py index c038349..3b0dec6 100644 --- a/accloudtant/aws/prices/__init__.py +++ b/accloudtant/aws/prices/__init__.py @@ -12,8 +12,11 @@ from accloudtant.utils import fix_lazy_json class Prices(object): def __init__(self): - self.prices = process_ec2() - self.output = print_prices(self.prices) + with warnings.catch_warnings(record=True) as w: + self.prices = process_ec2() + self.output = print_prices(self.prices) + for warning in w: + self.output += "\n{}".format(warning.message) def __repr__(self): return self.output diff --git a/bin/accloudtant b/bin/accloudtant index 71ef806..ab13966 100755 --- a/bin/accloudtant +++ b/bin/accloudtant @@ -19,7 +19,4 @@ def report(): raise NotImplementedError if __name__ == '__main__': - with warnings.catch_warnings(record=True) as w: - cli() - for warning in w: - print(warning.message) + cli()
Warnings on non supported pricelists vanished When printing prices, there were a nice list of warnings about problems getting the data. Since the addition of the two commands, this list vanished.
ifosch/accloudtant
diff --git a/tests/aws/print_expected_with_warnings.txt b/tests/aws/print_expected_with_warnings.txt new file mode 100644 index 0000000..4af3e90 --- /dev/null +++ b/tests/aws/print_expected_with_warnings.txt @@ -0,0 +1,6 @@ +EC2 (Hourly prices, no upfronts, no instance type features): +Type On Demand 1y No Upfront 1y Partial Upfront 1y All Upfront 3y Partial Upfront 3y All Upfront +---------- ----------- --------------- -------------------- ---------------- -------------------- ---------------- +c3.8xlarge 0.768 0.611 0.5121 0.5225 0.4143 0.3894 +g2.2xlarge 0.767 0.611 0.5121 0.5225 0.4143 0.3894 +WARN: Parser not implemented for Unknown diff --git a/tests/aws/test_prices.py b/tests/aws/test_prices.py index 1edd66d..f7f8b0f 100644 --- a/tests/aws/test_prices.py +++ b/tests/aws/test_prices.py @@ -1,3 +1,4 @@ +import warnings import pytest import accloudtant.aws.prices @@ -29,12 +30,18 @@ def mock_requests_get(): @pytest.fixture def mock_process_ec2(): class MockProcessEC2(object): - def set_responses(self, responses=None): + def set_responses(self, responses=None, unknown=None): if responses is None: responses = {} + if unknown is None: + unknown = [] self.responses = responses + self.unknown = unknown def __call__(self): + for name in self.unknown: + warnings.warn("WARN: Parser not implemented for {}" + .format(name)) return self.responses def __init__(self, responses=None): @@ -1383,3 +1390,296 @@ def test_prices(capsys, monkeypatch, mock_process_ec2): assert("{}\n".format(prices.output) == expected) assert(out == expected) assert(out2 == expected) + + +def test_prices_with_warning(capsys, monkeypatch, mock_process_ec2): + result = { + 'eip': { + 'eu-ireland': { + 'perRemapOver100': '0.10', + 'perRemapFirst100': '0.00', + 'oneEIP': '0.00', + 'perNonAttachedPerHour': '0.005', + 'perAdditionalEIPPerHour': '0.005', + }, + 'us-east': { + 'perRemapOver100': '0.10', + 'perRemapFirst100': '0.00', + 'oneEIP': '0.00', + 'perNonAttachedPerHour': '0.005', + 'perAdditionalEIPPerHour': '0.005', + }, + }, + 'cw': { + 'us-east-1': { + 'ec2Monitoring': '3.50', + 'cwRequests': '0.01', + 'cloudWatchLogs': '0.67', + 'cwMetrics': '0.50', + 'cwAlarms': '0.10', + }, + 'eu-west-1': { + 'ec2Monitoring': '4.55', + 'cwRequests': '0.013', + 'cwMetrics': '0.65', + 'cwAlarms': '0.0515', + }, + }, + 'ebs': { + 'eu-ireland': { + 'ebsSnapsToS3': '0.138', + 'Amazon EBS General Purpose (SSD) volumes': '0.095', + 'Amazon EBS Provisioned IOPS (SSD) volumes': '0.055', + 'Amazon EBS Magnetic volumes': '0.11', + }, + 'us-east': { + 'ebsSnapsToS3': '0.125', + 'Amazon EBS General Purpose (SSD) volumes': '0.095', + 'Amazon EBS Provisioned IOPS (SSD) volumes': '0.05', + 'Amazon EBS Magnetic volumes': '0.10', + }, + }, + 'data_transfer': { + 'eu-west-1': { + 'regional': {'prices': {'USD': '0.01', }, }, + 'ELB': {'prices': {'USD': '0.01', }, }, + 'AZ': {'prices': {'USD': '0.00', }, }, + 'dataXferInEC2': { + 'anotherRegion': '0.00', + 'sameAZprivateIP': '0.00', + 'anotherService': '0.00', + 'Internet': '0.00', + 'crossAZ': '0.01', + 'sameAZpublicIP': '0.01', + }, + 'dataXferOutEC2': { + 'Amazon CloudFront': '0.00', + 'crossRegion': '0.02', + 'crossAZOut': '0.01', + 'anotherServiceOut': '0.00', + 'sameAZpublicIPOut': '0.01', + 'sameAZprivateIPOut': '0.00', + }, + 'dataXferOutInternet': { + 'next4PBout': 'contactus', + 'next40TBout': '0.085', + 'next100TBout': '0.070', + 'next350TBout': '0.050', + 'next05PBout': 'contactus', + 'greater5PBout': 'contactus', + 'firstGBout': '0.000', + 'upTo10TBout': '0.090', + }, + }, + 'us-east-1': { + 'regional': {'prices': {'USD': '0.01', }, }, + 'ELB': {'prices': {'USD': '0.01', }, }, + 'AZ': {'prices': {'USD': '0.00', }, }, + 'dataXferInEC2': { + 'anotherRegion': '0.00', + 'sameAZprivateIP': '0.00', + 'anotherService': '0.00', + 'Internet': '0.00', + 'crossAZ': '0.01', + 'sameAZpublicIP': '0.01', + }, + 'dataXferOutEC2': { + 'Amazon CloudFront': '0.00', + 'crossRegion': '0.02', + 'crossAZOut': '0.01', + 'anotherServiceOut': '0.00', + 'sameAZpublicIPOut': '0.01', + 'sameAZprivateIPOut': '0.00', + }, + 'dataXferOutInternet': { + 'next4PBout': 'contactus', + 'next40TBout': '0.085', + 'next100TBout': '0.070', + 'next350TBout': '0.050', + 'next05PBout': 'contactus', + 'greater5PBout': 'contactus', + 'firstGBout': '0.000', + 'upTo10TBout': '0.090', + }, + }, + }, + 'elb': { + 'eu-ireland': { + 'perELBHour': '0.0008', + 'perGBProcessed': '0.028', + }, + 'us-east': { + 'perELBHour': '0.0008', + 'perGBProcessed': '0.025', + }, + }, + 'linux': { + 'us-east-1': { + 'g2.2xlarge': { + 'storageGB': '60 SSD', + 'ri': { + 'yrTerm1': { + 'noUpfront': { + 'upfront': '0', + 'monthlyStar': '446.03', + 'effectiveHourly': '0.611', + }, + 'allUpfront': { + 'upfront': '2974', + 'monthlyStar': '133.59', + 'effectiveHourly': '0.5225', + }, + 'partialUpfront': { + 'upfront': '4486', + 'monthlyStar': '0', + 'effectiveHourly': '0.5121', + }, + }, + 'yrTerm3': { + 'allUpfront': { + 'upfront': '10234', + 'monthlyStar': '0', + 'effectiveHourly': '0.3894', + }, + 'partialUpfront': { + 'upfront': '7077', + 'monthlyStar': '105.85', + 'effectiveHourly': '0.4143', + }, + }, + }, + 'od': '0.767', + 'memoryGiB': '15', + 'vCPU': '8', + }, + 'c3.8xlarge': { + 'storageGB': '60 SSD', + 'ri': { + 'yrTerm1': { + 'noUpfront': { + 'upfront': '0', + 'monthlyStar': '446.03', + 'effectiveHourly': '0.611', + }, + 'allUpfront': { + 'upfront': '2974', + 'monthlyStar': '133.59', + 'effectiveHourly': '0.5225', + }, + 'partialUpfront': { + 'upfront': '4486', + 'monthlyStar': '0', + 'effectiveHourly': '0.5121', + }, + }, + 'yrTerm3': { + 'allUpfront': { + 'upfront': '10234', + 'monthlyStar': '0', + 'effectiveHourly': '0.3894', + }, + 'partialUpfront': { + 'upfront': '7077', + 'monthlyStar': '105.85', + 'effectiveHourly': '0.4143', + }, + }, + }, + 'od': '0.768', + 'memoryGiB': '15', + 'vCPU': '8', + }, + }, + 'eu-west-1': { + 'g2.2xlarge': { + 'storageGB': '60 SSD', + 'ri': { + 'yrTerm1': { + 'noUpfront': { + 'upfront': '0', + 'monthlyStar': '446.03', + 'effectiveHourly': '0.611', + }, + 'allUpfront': { + 'upfront': '2974', + 'monthlyStar': '133.59', + 'effectiveHourly': '0.5225', + }, + 'partialUpfront': { + 'upfront': '4486', + 'monthlyStar': '0', + 'effectiveHourly': '0.5121', + }, + }, + 'yrTerm3': { + 'allUpfront': { + 'upfront': '10234', + 'monthlyStar': '0', + 'effectiveHourly': '0.3894', + }, + 'partialUpfront': { + 'upfront': '7077', + 'monthlyStar': '105.85', + 'effectiveHourly': '0.4143', + }, + }, + }, + 'od': '0.787', + 'memoryGiB': '15', + 'vCPU': '8', + }, + 'c3.8xlarge': { + 'storageGB': '60 SSD', + 'ri': { + 'yrTerm1': { + 'noUpfront': { + 'upfront': '0', + 'monthlyStar': '446.03', + 'effectiveHourly': '0.611', + }, + 'allUpfront': { + 'upfront': '2974', + 'monthlyStar': '133.59', + 'effectiveHourly': '0.5225', + }, + 'partialUpfront': { + 'upfront': '4486', + 'monthlyStar': '0', + 'effectiveHourly': '0.5121', + }, + }, + 'yrTerm3': { + 'allUpfront': { + 'upfront': '10234', + 'monthlyStar': '0', + 'effectiveHourly': '0.3894', + }, + 'partialUpfront': { + 'upfront': '7077', + 'monthlyStar': '105.85', + 'effectiveHourly': '0.4143', + }, + }, + }, + 'od': '0.767', + 'memoryGiB': '15', + 'vCPU': '8', + }, + }, + }, + } + expected = open('tests/aws/print_expected_with_warnings.txt', 'r').read() + + monkeypatch.setattr( + 'accloudtant.aws.prices.process_ec2', + mock_process_ec2 + ) + mock_process_ec2.set_responses(result, ['Unknown']) + + prices = accloudtant.aws.prices.Prices() + print(prices) + out, err = capsys.readouterr() + + assert(prices.prices == result) + assert("{}\n".format(prices.output) == expected) + assert(out == expected)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 1 }, "num_modified_files": 2 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/ifosch/accloudtant.git@66343620af6679769c3f5a7c727ae5d3492dc5ac#egg=accloudtant certifi==2025.1.31 charset-normalizer==3.4.1 exceptiongroup==1.2.2 idna==3.10 iniconfig==2.1.0 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 requests==2.32.3 tabulate==0.9.0 tomli==2.2.1 urllib3==2.3.0
name: accloudtant channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - certifi==2025.1.31 - charset-normalizer==3.4.1 - exceptiongroup==1.2.2 - idna==3.10 - iniconfig==2.1.0 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - requests==2.32.3 - tabulate==0.9.0 - tomli==2.2.1 - urllib3==2.3.0 prefix: /opt/conda/envs/accloudtant
[ "tests/aws/test_prices.py::test_prices_with_warning" ]
[]
[ "tests/aws/test_prices.py::test_model_ec2", "tests/aws/test_prices.py::test_process_model", "tests/aws/test_prices.py::test_process_generic", "tests/aws/test_prices.py::test_process_on_demand", "tests/aws/test_prices.py::test_process_reserved", "tests/aws/test_prices.py::test_process_data_transfer", "tests/aws/test_prices.py::test_process_ebs", "tests/aws/test_prices.py::test_process_eip", "tests/aws/test_prices.py::test_process_cw", "tests/aws/test_prices.py::test_process_elb", "tests/aws/test_prices.py::test_print_prices", "tests/aws/test_prices.py::test_prices" ]
[]
null
333
sympy__sympy-10249
a9c0fd064c14c355247d6568ee5750ab560c36d4
2015-12-12 15:48:34
0241b35cae5f2adc04dc37b25f7dc9c5f00bd746
diff --git a/doc/src/tutorial/printing.rst b/doc/src/tutorial/printing.rst index ed08ded3a2..a606146357 100644 --- a/doc/src/tutorial/printing.rst +++ b/doc/src/tutorial/printing.rst @@ -14,7 +14,7 @@ Printers There are several printers available in SymPy. The most common ones are - str -- srepr +- repr - ASCII pretty printer - Unicode pretty printer - LaTeX @@ -119,17 +119,17 @@ exactly the same as the expression as you would enter it. >>> print(Integral(sqrt(1/x), x)) Integral(sqrt(1/x), x) -srepr ------ +repr +---- -The srepr form of an expression is designed to show the exact form of an +The repr form of an expression is designed to show the exact form of an expression. It will be discussed more in the :ref:`tutorial-manipulation` section. To get it, use ``srepr()`` [#srepr-fn]_. >>> srepr(Integral(sqrt(1/x), x)) "Integral(Pow(Pow(Symbol('x'), Integer(-1)), Rational(1, 2)), Tuple(Symbol('x')))" -The srepr form is mostly useful for understanding how an expression is built +The repr form is mostly useful for understanding how an expression is built internally. diff --git a/sympy/categories/baseclasses.py b/sympy/categories/baseclasses.py index 9b3d68937a..79b12d08da 100644 --- a/sympy/categories/baseclasses.py +++ b/sympy/categories/baseclasses.py @@ -1,6 +1,6 @@ from __future__ import print_function, division -from sympy.core import S, Basic, Dict, Symbol, Tuple +from sympy.core import S, Basic, Dict, Symbol, Tuple, sympify from sympy.core.compatibility import range, iterable from sympy.sets import Set, FiniteSet, EmptySet @@ -722,8 +722,8 @@ def __new__(cls, *args): for morphism in conclusions_arg: # Check that no new objects appear in conclusions. - if ((objects.contains(morphism.domain) == S.true) and - (objects.contains(morphism.codomain) == S.true)): + if ((sympify(objects.contains(morphism.domain)) is S.true) and + (sympify(objects.contains(morphism.codomain)) is S.true)): # No need to add identities and recurse # composites this time. Diagram._add_morphism_closure( @@ -920,14 +920,14 @@ def subdiagram_from_objects(self, objects): new_premises = {} for morphism, props in self.premises.items(): - if ((objects.contains(morphism.domain) == S.true) and - (objects.contains(morphism.codomain) == S.true)): + if ((sympify(objects.contains(morphism.domain)) is S.true) and + (sympify(objects.contains(morphism.codomain)) is S.true)): new_premises[morphism] = props new_conclusions = {} for morphism, props in self.conclusions.items(): - if ((objects.contains(morphism.domain) == S.true) and - (objects.contains(morphism.codomain) == S.true)): + if ((sympify(objects.contains(morphism.domain)) is S.true) and + (sympify(objects.contains(morphism.codomain)) is S.true)): new_conclusions[morphism] = props return Diagram(new_premises, new_conclusions) diff --git a/sympy/sets/sets.py b/sympy/sets/sets.py index 3b0ba18a62..ac7a486dba 100644 --- a/sympy/sets/sets.py +++ b/sympy/sets/sets.py @@ -511,8 +511,8 @@ def __sub__(self, other): return Complement(self, other) def __contains__(self, other): - symb = self.contains(other) - if symb not in (True, False): + symb = sympify(self.contains(other)) + if not (symb is S.true or symb is S.false): raise TypeError('contains did not evaluate to a bool: %r' % symb) return bool(symb) @@ -949,8 +949,8 @@ def _union(self, other): return Interval(start, end, left_open, right_open) # If I have open end points and these endpoints are contained in other - if ((self.left_open and other.contains(self.start) == True) or - (self.right_open and other.contains(self.end) == True)): + if ((self.left_open and sympify(other.contains(self.start)) is S.true) or + (self.right_open and sympify(other.contains(self.end)) is S.true)): # Fill in my end points and return open_left = self.left_open and self.start not in other open_right = self.right_open and self.end not in other @@ -1427,13 +1427,23 @@ def _contains(self, other): return And(*[set.contains(other) for set in self.args]) def __iter__(self): + no_iter = True for s in self.args: if s.is_iterable: + no_iter = False other_sets = set(self.args) - set((s,)) other = Intersection(other_sets, evaluate=False) - return (x for x in s if x in other) + for x in s: + c = sympify(other.contains(x)) + if c is S.true: + yield x + elif c is S.false: + pass + else: + yield c - raise ValueError("None of the constituent sets are iterable") + if no_iter: + raise ValueError("None of the constituent sets are iterable") @staticmethod def _handle_finite_sets(args): @@ -1485,7 +1495,7 @@ def _handle_finite_sets(args): # contained in `v` then remove them from `v` # and add this as a new arg contained = [x for x in symbolic_s_list - if v.contains(x) == True] + if sympify(v.contains(x)) is S.true] if contained != symbolic_s_list: new_args.append( v - FiniteSet( @@ -1837,12 +1847,20 @@ def _complement(self, other): return None elif isinstance(other, FiniteSet): - unk = FiniteSet(*[el for el in self if other.contains(el) - not in (True, False)]) + unk = [] + for i in self: + c = sympify(other.contains(i)) + if c is not S.true and c is not S.false: + unk.append(i) + unk = FiniteSet(*unk) if unk == self: return - return Complement(FiniteSet(*[el for el in other if - self.contains(el) != True]), unk) + not_true = [] + for i in other: + c = sympify(self.contains(i)) + if c is not S.true: + not_true.append(i) + return Complement(FiniteSet(*not_true), unk) return Set._complement(self, other) @@ -1857,7 +1875,7 @@ def _union(self, other): return FiniteSet(*(self._elements | other._elements)) # If other set contains one of my elements, remove it from myself - if any(other.contains(x) == True for x in self): + if any(sympify(other.contains(x)) is S.true for x in self): return set(( FiniteSet(*[x for x in self if other.contains(x) != True]), other)) diff --git a/sympy/stats/rv.py b/sympy/stats/rv.py index b3686a1fab..1b04700aec 100644 --- a/sympy/stats/rv.py +++ b/sympy/stats/rv.py @@ -387,7 +387,8 @@ def __contains__(self, other): for domain in self.domains: # Collect the parts of this event which associate to this domain elem = frozenset([item for item in other - if domain.symbols.contains(item[0]) == S.true]) + if sympify(domain.symbols.contains(item[0])) + is S.true]) # Test this sub-event if elem not in domain: return False
problems converting an intersection to a list ``` >>> list(Intersection(S.Reals, FiniteSet(x))) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "sympy\sets\sets.py", line 1434, in <genexpr> return (x for x in s if x in other) File "sympy\sets\sets.py", line 516, in __contains__ raise TypeError('contains did not evaluate to a bool: %r' % symb) TypeError: contains did not evaluate to a bool: And(x < oo, x > -oo) ```
sympy/sympy
diff --git a/sympy/sets/tests/test_sets.py b/sympy/sets/tests/test_sets.py index cc9065d56c..1afe34c7bb 100644 --- a/sympy/sets/tests/test_sets.py +++ b/sympy/sets/tests/test_sets.py @@ -936,3 +936,8 @@ def test_issue_10113(): assert imageset(x, f, S.Reals) == Union(Interval(-oo, 0), Interval(1, oo, True, True)) assert imageset(x, f, Interval(-2, 2)) == Interval(-oo, 0) assert imageset(x, f, Interval(-2, 3)) == Union(Interval(-oo, 0), Interval(S(9)/5, oo)) + + +def test_issue_10248(): + assert list(Intersection(S.Reals, FiniteSet(x))) == [ + And(x < oo, x > -oo)]
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 4 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "mpmath>=0.19", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///croot/attrs_1668696182826/work certifi @ file:///croot/certifi_1671487769961/work/certifi flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work mpmath==1.3.0 packaging @ file:///croot/packaging_1671697413597/work pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pytest==7.1.2 -e git+https://github.com/sympy/sympy.git@a9c0fd064c14c355247d6568ee5750ab560c36d4#egg=sympy tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing_extensions @ file:///croot/typing_extensions_1669924550328/work zipp @ file:///croot/zipp_1672387121353/work
name: sympy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=22.1.0=py37h06a4308_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - flit-core=3.6.0=pyhd3eb1b0_0 - importlib-metadata=4.11.3=py37h06a4308_0 - importlib_metadata=4.11.3=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=22.0=py37h06a4308_0 - pip=22.3.1=py37h06a4308_0 - pluggy=1.0.0=py37h06a4308_1 - py=1.11.0=pyhd3eb1b0_0 - pytest=7.1.2=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py37h06a4308_0 - typing_extensions=4.4.0=py37h06a4308_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zipp=3.11.0=py37h06a4308_0 - zlib=1.2.13=h5eee18b_1 - pip: - mpmath==1.3.0 prefix: /opt/conda/envs/sympy
[ "sympy/sets/tests/test_sets.py::test_issue_10248" ]
[]
[ "sympy/sets/tests/test_sets.py::test_interval_arguments", "sympy/sets/tests/test_sets.py::test_interval_symbolic_end_points", "sympy/sets/tests/test_sets.py::test_union", "sympy/sets/tests/test_sets.py::test_difference", "sympy/sets/tests/test_sets.py::test_Complement", "sympy/sets/tests/test_sets.py::test_complement", "sympy/sets/tests/test_sets.py::test_intersect", "sympy/sets/tests/test_sets.py::test_intersection", "sympy/sets/tests/test_sets.py::test_issue_9623", "sympy/sets/tests/test_sets.py::test_is_disjoint", "sympy/sets/tests/test_sets.py::test_ProductSet_of_single_arg_is_arg", "sympy/sets/tests/test_sets.py::test_interval_subs", "sympy/sets/tests/test_sets.py::test_interval_to_mpi", "sympy/sets/tests/test_sets.py::test_measure", "sympy/sets/tests/test_sets.py::test_is_subset", "sympy/sets/tests/test_sets.py::test_is_proper_subset", "sympy/sets/tests/test_sets.py::test_is_superset", "sympy/sets/tests/test_sets.py::test_is_proper_superset", "sympy/sets/tests/test_sets.py::test_contains", "sympy/sets/tests/test_sets.py::test_interval_symbolic", "sympy/sets/tests/test_sets.py::test_union_contains", "sympy/sets/tests/test_sets.py::test_is_number", "sympy/sets/tests/test_sets.py::test_Interval_is_left_unbounded", "sympy/sets/tests/test_sets.py::test_Interval_is_right_unbounded", "sympy/sets/tests/test_sets.py::test_Interval_as_relational", "sympy/sets/tests/test_sets.py::test_Finite_as_relational", "sympy/sets/tests/test_sets.py::test_Union_as_relational", "sympy/sets/tests/test_sets.py::test_Intersection_as_relational", "sympy/sets/tests/test_sets.py::test_EmptySet", "sympy/sets/tests/test_sets.py::test_finite_basic", "sympy/sets/tests/test_sets.py::test_powerset", "sympy/sets/tests/test_sets.py::test_product_basic", "sympy/sets/tests/test_sets.py::test_real", "sympy/sets/tests/test_sets.py::test_supinf", "sympy/sets/tests/test_sets.py::test_universalset", "sympy/sets/tests/test_sets.py::test_Union_of_ProductSets_shares", "sympy/sets/tests/test_sets.py::test_Interval_free_symbols", "sympy/sets/tests/test_sets.py::test_image_interval", "sympy/sets/tests/test_sets.py::test_image_piecewise", "sympy/sets/tests/test_sets.py::test_image_FiniteSet", "sympy/sets/tests/test_sets.py::test_image_Union", "sympy/sets/tests/test_sets.py::test_image_EmptySet", "sympy/sets/tests/test_sets.py::test_issue_5724_7680", "sympy/sets/tests/test_sets.py::test_boundary", "sympy/sets/tests/test_sets.py::test_boundary_Union", "sympy/sets/tests/test_sets.py::test_boundary_ProductSet", "sympy/sets/tests/test_sets.py::test_boundary_ProductSet_line", "sympy/sets/tests/test_sets.py::test_is_open", "sympy/sets/tests/test_sets.py::test_is_closed", "sympy/sets/tests/test_sets.py::test_closure", "sympy/sets/tests/test_sets.py::test_interior", "sympy/sets/tests/test_sets.py::test_issue_7841", "sympy/sets/tests/test_sets.py::test_Eq", "sympy/sets/tests/test_sets.py::test_SymmetricDifference", "sympy/sets/tests/test_sets.py::test_issue_9536", "sympy/sets/tests/test_sets.py::test_issue_9637", "sympy/sets/tests/test_sets.py::test_issue_9808", "sympy/sets/tests/test_sets.py::test_issue_9956", "sympy/sets/tests/test_sets.py::test_issue_Symbol_inter", "sympy/sets/tests/test_sets.py::test_issue_10113" ]
[]
BSD
334
garnaat__placebo-21
81cb9c0e48f7e6e58e2cdd6414b777b39c846c4b
2015-12-14 17:29:40
81cb9c0e48f7e6e58e2cdd6414b777b39c846c4b
diff --git a/placebo/__init__.py b/placebo/__init__.py index 43e34d6..8dc9470 100644 --- a/placebo/__init__.py +++ b/placebo/__init__.py @@ -15,7 +15,7 @@ from placebo.pill import Pill -def attach(session, data_path, debug=False): - pill = Pill(debug=debug) +def attach(session, data_path, prefix=None, debug=False): + pill = Pill(prefix=prefix, debug=debug) pill.attach(session, data_path) return pill diff --git a/placebo/pill.py b/placebo/pill.py index 472402e..2c0cdd2 100644 --- a/placebo/pill.py +++ b/placebo/pill.py @@ -35,10 +35,11 @@ class Pill(object): clients = [] - def __init__(self, debug=False): + def __init__(self, prefix=None, debug=False): if debug: self._set_logger(__name__, logging.DEBUG) self.filename_re = re.compile(r'.*\..*_(?P<index>\d+).json') + self.prefix = prefix self._uuid = str(uuid.uuid4()) self._data_path = None self._mode = None @@ -52,6 +53,10 @@ class Pill(object): def mode(self): return self._mode + @property + def data_path(self): + return self._data_path + def _set_logger(self, logger_name, level=logging.INFO): """ Convenience function to quickly configure full debug output @@ -178,6 +183,8 @@ class Pill(object): def get_new_file_path(self, service, operation): base_name = '{}.{}'.format(service, operation) + if self.prefix: + base_name = '{}.{}'.format(self.prefix, base_name) LOG.debug('get_new_file_path: %s', base_name) index = 0 glob_pattern = os.path.join(self._data_path, base_name + '*') @@ -194,6 +201,8 @@ class Pill(object): def get_next_file_path(self, service, operation): base_name = '{}.{}'.format(service, operation) + if self.prefix: + base_name = '{}.{}'.format(self.prefix, base_name) LOG.debug('get_next_file_path: %s', base_name) next_file = None while next_file is None:
Optional prefix for saved response filenames I noticed that each time Placebo writes a new response JSON file, it adds a `_<number>` suffix before the `.json`, and simply increments the suffix until it finds one that doesn't exist yet. This is better than overwriting previous runs, but doesn't give any way to distinguish multiple runs. What would be really nice, is if you could optionally set a `prefix` argument on a `Pill` object. This prefix would be applied to all filenames written by the pill, like so: `prefix.iam.ListUserPolicies_1.json`. I can think of two good use cases for this: 1. Define the prefix uniquely on each execution, using a command line argument, timestamp, or some other generated value. Now individual executions can be reloaded and replayed, or even diffed for comparison. 2. For an application that uses multiple Boto sessions, define a different prefix for each session. For example, I can use `base` for my initial credentials session, `mfa` for my new session after MFA authentication, and `<rolename>` for a session created using an IAM role. I can even combine that with a timestamp (as in case 1) to separate multiple runs.
garnaat/placebo
diff --git a/tests/unit/responses/saved/foo.ec2.DescribeAddresses_1.json b/tests/unit/responses/saved/foo.ec2.DescribeAddresses_1.json new file mode 100644 index 0000000..a907bf1 --- /dev/null +++ b/tests/unit/responses/saved/foo.ec2.DescribeAddresses_1.json @@ -0,0 +1,16 @@ +{ + "status_code": 200, + "data": { + "Addresses": [ + { + "PublicIp": "42.43.44.45", + "Domain": "vpc", + "AllocationId": "eipalloc-87654321" + } + ], + "ResponseMetadata": { + "HTTPStatusCode": 200, + "RequestId": "b0fe7bb2-6552-4ea8-8a36-9685044048ab" + } + } +} diff --git a/tests/unit/test_canned.py b/tests/unit/test_canned.py index 913459d..c25905a 100644 --- a/tests/unit/test_canned.py +++ b/tests/unit/test_canned.py @@ -58,3 +58,23 @@ class TestPlacebo(unittest.TestCase): self.assertEqual(len(result['KeyPairs']), 2) self.assertEqual(result['KeyPairs'][0]['KeyName'], 'FooBar') self.assertEqual(result['KeyPairs'][1]['KeyName'], 'FieBaz') + + def test_prefix_new_file_path(self): + self.pill.prefix = 'foo' + service = 'ec2' + operation = 'DescribeAddresses' + filename = '{}.{}.{}_2.json'.format(self.pill.prefix, service, + operation) + target = os.path.join(self.data_path, filename) + self.assertEqual(self.pill.get_new_file_path(service, operation), + target) + + def test_prefix_next_file_path(self): + self.pill.prefix = 'foo' + service = 'ec2' + operation = 'DescribeAddresses' + filename = '{}.{}.{}_1.json'.format(self.pill.prefix, service, + operation) + target = os.path.join(self.data_path, filename) + self.assertEqual(self.pill.get_next_file_path(service, operation), + target)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 2 }
0.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "nose", "coverage", "pytest" ], "pre_install": null, "python": "3.5", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 boto3==1.2.2 botocore==1.3.30 certifi==2021.5.30 coverage==6.2 docutils==0.18.1 importlib-metadata==4.8.3 iniconfig==1.1.1 jmespath==0.10.0 mock==1.3.0 nose==1.3.7 packaging==21.3 pbr==6.1.1 -e git+https://github.com/garnaat/placebo.git@81cb9c0e48f7e6e58e2cdd6414b777b39c846c4b#egg=placebo pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 python-dateutil==2.9.0.post0 six==1.17.0 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: placebo channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - boto3==1.2.2 - botocore==1.3.30 - coverage==6.2 - docutils==0.18.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - jmespath==0.10.0 - mock==1.3.0 - nose==1.3.7 - packaging==21.3 - pbr==6.1.1 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - python-dateutil==2.9.0.post0 - six==1.17.0 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/placebo
[ "tests/unit/test_canned.py::TestPlacebo::test_prefix_new_file_path", "tests/unit/test_canned.py::TestPlacebo::test_prefix_next_file_path" ]
[]
[ "tests/unit/test_canned.py::TestPlacebo::test_describe_addresses", "tests/unit/test_canned.py::TestPlacebo::test_describe_key_pairs" ]
[]
Apache License 2.0
335
sympy__sympy-10261
4bc92d1fd8cec503d66f8aed30f9348e7c8b08d1
2015-12-15 06:02:42
0241b35cae5f2adc04dc37b25f7dc9c5f00bd746
jksuom: I have not studied how this would appear on screen or paper, but does the ``\thinspace`` (``\,``) suffice to separate the numbers visually? Perhaps you could also test with ``\:`` or ``\;`` (``\medspace`` or ``\thickspace``). gxyd: > does the \thinspace (\,) suffice to separate the numbers visually Yes i think it does. Here are the few tests [sympy1.pdf](https://github.com/sympy/sympy/files/62229/sympy1.pdf) In order with "/," , "/:" and "/;" respectively. jksuom: I am not quite convinced by these examples that involve only single digits. What would happen with two-digit numbers? (The line with ``\medspace`` looks also reasonable.) gxyd: Here are some screenshots. I don't think my `LaTex` installation supports `\medspace` or `thickspace`. I tried with spacings mentioned on [this answer](http://tex.stackexchange.com/a/74354/88362) . ![screen_sympy_printing](https://cloud.githubusercontent.com/assets/7712241/11813250/55cc1410-a366-11e5-80f0-401c5b327001.png) I think i like the `\enspace` printing. Since it ensures the adequate spacing. jksuom: ``\enspace`` looks about twice the size of ``\medmuskip`` (given by ``\:`` or ``\>``) on my screen. It seems to me that it is more than adequate. Perhaps ``\medmuskip`` or ``\thickmuskip`` would suffice. gxyd: I don't know how the `\thickmuskip` looks on printing(seems like my `LaTex`distribution does not support it). Though i have changed it to `\thickmuskip`. Plese have a look. jksuom: ``\thinmuskip`` (thin math space), ``\medmuskip`` (medium math space) and ``\thickmuskip`` (thick math space) denote the *size* of the skip (they do not create it). They are defined in the TeXbook as follows. ``\thinskip=3mu`` ``\medskip=4mu plus 2mu minus 4mu`` ``\thickskip=5mu plus 5mu`` Here ``mu`` (math unit) is a unit of length depending on the font in use. (I think that it is usually one eighteenth of ``\em`` that is the size of the letter "m" in the font. The size of ``\enskip`` is one half ``\em``.) ``\medskip`` can stretch of shrink to adjust for the available space. It can vanish altogether. Hence it may be preferable to use ``\thickskip``. These spaces do not appear explicitly in latex text. Instead, they are invoked by the commands ``\,``, ``\>`` and ``\;`` that are defined as follows (``\mskip`` will create the space). ``\def\,{\mskip\thinskip}`` ``\def\>{\mskip\medskip}`` ``\def\;{\mskip\thickskip}`` gxyd: Thanks for the detailed explanation. I am learning `LaTex` as well these days, though currently only know a few basics. I didn't knew these wide variety of horizontal space commands. Though for the PR, i think is understood you comment and have made the changes. I think they also work ok on my system. jksuom: Looks good. +1
diff --git a/sympy/printing/latex.py b/sympy/printing/latex.py index add4e9535a..041d395729 100644 --- a/sympy/printing/latex.py +++ b/sympy/printing/latex.py @@ -280,6 +280,19 @@ def _print_Add(self, expr, order=None): return tex + def _print_Cycle(self, expr): + from sympy.combinatorics.permutations import Permutation, Cycle + if str(expr) == '()': + return r"\left( \right)" + expr_perm = Permutation(expr).cyclic_form + term_tex = '' + for i in expr_perm: + term_tex += str(i).replace(',', r"\;") + term_tex = term_tex.replace('[', r"\left( ") + term_tex = term_tex.replace(']', r"\right)") + return term_tex + + _print_Permutation = _print_Cycle def _print_Float(self, expr): # Based off of that in StrPrinter
LaTex printing of Cycle Currently ``` >>> from sympy.combinatorics.permutations import Cycle >>> latex(Cycle(1, 2)) '\\left \\{ 1 : 2, \\quad 2 : 1\\right \\}' ``` That's something of latex for dictionary(`dict`). The LaTex should give the latex representation of `(1 2)`. i.e `latex(Cycle(1, 2))` should return the latex representation of `(1 2)`. In IPython Notebook ``` >>> init_printing(use_unicode=True) >>> Cycle(1, 2) {0:0,1:2,2:1} ``` You can also see the discussion [here](https://github.com/sympy/sympy/pull/10183#issuecomment-162313211) on PR #10183 . Changes should be made to file `printing/latex.py`(mentioned by jksuom) This will be an "easy to fix" issue.
sympy/sympy
diff --git a/sympy/printing/tests/test_latex.py b/sympy/printing/tests/test_latex.py index f50b103c5e..eae99555e5 100644 --- a/sympy/printing/tests/test_latex.py +++ b/sympy/printing/tests/test_latex.py @@ -28,6 +28,7 @@ from sympy.logic.boolalg import And, Or, Xor from sympy.core.trace import Tr from sympy.core.compatibility import range +from sympy.combinatorics.permutations import Cycle, Permutation x, y, z, t, a, b = symbols('x y z t a b') k, m, n = symbols('k m n', integer=True) @@ -128,6 +129,18 @@ def test_latex_builtins(): assert latex(false) == r'\mathrm{False}' +def test_latex_cycle(): + assert latex(Cycle(1, 2, 4)) == r"\left( 1\; 2\; 4\right)" + assert latex(Cycle(1, 2)(4, 5, 6)) == r"\left( 1\; 2\right)\left( 4\; 5\; 6\right)" + assert latex(Cycle()) == r"\left( \right)" + + +def test_latex_permutation(): + assert latex(Permutation(1, 2, 4)) == r"\left( 1\; 2\; 4\right)" + assert latex(Permutation(1, 2)(4, 5, 6)) == r"\left( 1\; 2\right)\left( 4\; 5\; 6\right)" + assert latex(Permutation()) == r"\left( \right)" + + def test_latex_Float(): assert latex(Float(1.0e100)) == r"1.0 \cdot 10^{100}" assert latex(Float(1.0e-100)) == r"1.0 \cdot 10^{-100}"
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 1 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "mpmath>=0.19", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///croot/attrs_1668696182826/work certifi @ file:///croot/certifi_1671487769961/work/certifi flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work mpmath==1.3.0 packaging @ file:///croot/packaging_1671697413597/work pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pytest==7.1.2 -e git+https://github.com/sympy/sympy.git@4bc92d1fd8cec503d66f8aed30f9348e7c8b08d1#egg=sympy tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing_extensions @ file:///croot/typing_extensions_1669924550328/work zipp @ file:///croot/zipp_1672387121353/work
name: sympy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=22.1.0=py37h06a4308_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - flit-core=3.6.0=pyhd3eb1b0_0 - importlib-metadata=4.11.3=py37h06a4308_0 - importlib_metadata=4.11.3=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=22.0=py37h06a4308_0 - pip=22.3.1=py37h06a4308_0 - pluggy=1.0.0=py37h06a4308_1 - py=1.11.0=pyhd3eb1b0_0 - pytest=7.1.2=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py37h06a4308_0 - typing_extensions=4.4.0=py37h06a4308_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zipp=3.11.0=py37h06a4308_0 - zlib=1.2.13=h5eee18b_1 - pip: - mpmath==1.3.0 prefix: /opt/conda/envs/sympy
[ "sympy/printing/tests/test_latex.py::test_latex_cycle", "sympy/printing/tests/test_latex.py::test_latex_permutation" ]
[]
[ "sympy/printing/tests/test_latex.py::test_printmethod", "sympy/printing/tests/test_latex.py::test_latex_basic", "sympy/printing/tests/test_latex.py::test_latex_builtins", "sympy/printing/tests/test_latex.py::test_latex_Float", "sympy/printing/tests/test_latex.py::test_latex_symbols", "sympy/printing/tests/test_latex.py::test_latex_functions", "sympy/printing/tests/test_latex.py::test_hyper_printing", "sympy/printing/tests/test_latex.py::test_latex_bessel", "sympy/printing/tests/test_latex.py::test_latex_fresnel", "sympy/printing/tests/test_latex.py::test_latex_brackets", "sympy/printing/tests/test_latex.py::test_latex_indexed", "sympy/printing/tests/test_latex.py::test_latex_derivatives", "sympy/printing/tests/test_latex.py::test_latex_subs", "sympy/printing/tests/test_latex.py::test_latex_integrals", "sympy/printing/tests/test_latex.py::test_latex_sets", "sympy/printing/tests/test_latex.py::test_latex_Range", "sympy/printing/tests/test_latex.py::test_latex_sequences", "sympy/printing/tests/test_latex.py::test_latex_FourierSeries", "sympy/printing/tests/test_latex.py::test_latex_FormalPowerSeries", "sympy/printing/tests/test_latex.py::test_latex_intervals", "sympy/printing/tests/test_latex.py::test_latex_emptyset", "sympy/printing/tests/test_latex.py::test_latex_union", "sympy/printing/tests/test_latex.py::test_latex_symmetric_difference", "sympy/printing/tests/test_latex.py::test_latex_Complement", "sympy/printing/tests/test_latex.py::test_latex_Complexes", "sympy/printing/tests/test_latex.py::test_latex_productset", "sympy/printing/tests/test_latex.py::test_latex_Naturals", "sympy/printing/tests/test_latex.py::test_latex_Naturals0", "sympy/printing/tests/test_latex.py::test_latex_Integers", "sympy/printing/tests/test_latex.py::test_latex_ImageSet", "sympy/printing/tests/test_latex.py::test_latex_ConditionSet", "sympy/printing/tests/test_latex.py::test_latex_ComplexRegion", "sympy/printing/tests/test_latex.py::test_latex_Contains", "sympy/printing/tests/test_latex.py::test_latex_sum", "sympy/printing/tests/test_latex.py::test_latex_product", "sympy/printing/tests/test_latex.py::test_latex_limits", "sympy/printing/tests/test_latex.py::test_issue_3568", "sympy/printing/tests/test_latex.py::test_latex", "sympy/printing/tests/test_latex.py::test_latex_dict", "sympy/printing/tests/test_latex.py::test_latex_list", "sympy/printing/tests/test_latex.py::test_latex_rational", "sympy/printing/tests/test_latex.py::test_latex_inverse", "sympy/printing/tests/test_latex.py::test_latex_DiracDelta", "sympy/printing/tests/test_latex.py::test_latex_Heaviside", "sympy/printing/tests/test_latex.py::test_latex_KroneckerDelta", "sympy/printing/tests/test_latex.py::test_latex_LeviCivita", "sympy/printing/tests/test_latex.py::test_mode", "sympy/printing/tests/test_latex.py::test_latex_Piecewise", "sympy/printing/tests/test_latex.py::test_latex_Matrix", "sympy/printing/tests/test_latex.py::test_latex_matrix_with_functions", "sympy/printing/tests/test_latex.py::test_latex_mul_symbol", "sympy/printing/tests/test_latex.py::test_latex_issue_4381", "sympy/printing/tests/test_latex.py::test_latex_issue_4576", "sympy/printing/tests/test_latex.py::test_latex_pow_fraction", "sympy/printing/tests/test_latex.py::test_noncommutative", "sympy/printing/tests/test_latex.py::test_latex_order", "sympy/printing/tests/test_latex.py::test_latex_Lambda", "sympy/printing/tests/test_latex.py::test_latex_PolyElement", "sympy/printing/tests/test_latex.py::test_latex_FracElement", "sympy/printing/tests/test_latex.py::test_latex_Poly", "sympy/printing/tests/test_latex.py::test_latex_RootOf", "sympy/printing/tests/test_latex.py::test_latex_RootSum", "sympy/printing/tests/test_latex.py::test_settings", "sympy/printing/tests/test_latex.py::test_latex_numbers", "sympy/printing/tests/test_latex.py::test_lamda", "sympy/printing/tests/test_latex.py::test_custom_symbol_names", "sympy/printing/tests/test_latex.py::test_matAdd", "sympy/printing/tests/test_latex.py::test_matMul", "sympy/printing/tests/test_latex.py::test_latex_MatrixSlice", "sympy/printing/tests/test_latex.py::test_latex_RandomDomain", "sympy/printing/tests/test_latex.py::test_PrettyPoly", "sympy/printing/tests/test_latex.py::test_integral_transforms", "sympy/printing/tests/test_latex.py::test_PolynomialRingBase", "sympy/printing/tests/test_latex.py::test_categories", "sympy/printing/tests/test_latex.py::test_Modules", "sympy/printing/tests/test_latex.py::test_QuotientRing", "sympy/printing/tests/test_latex.py::test_Tr", "sympy/printing/tests/test_latex.py::test_Adjoint", "sympy/printing/tests/test_latex.py::test_Hadamard", "sympy/printing/tests/test_latex.py::test_ZeroMatrix", "sympy/printing/tests/test_latex.py::test_boolean_args_order", "sympy/printing/tests/test_latex.py::test_imaginary", "sympy/printing/tests/test_latex.py::test_builtins_without_args", "sympy/printing/tests/test_latex.py::test_latex_greek_functions", "sympy/printing/tests/test_latex.py::test_translate", "sympy/printing/tests/test_latex.py::test_other_symbols", "sympy/printing/tests/test_latex.py::test_modifiers", "sympy/printing/tests/test_latex.py::test_greek_symbols", "sympy/printing/tests/test_latex.py::test_builtin_no_args", "sympy/printing/tests/test_latex.py::test_issue_6853", "sympy/printing/tests/test_latex.py::test_Mul", "sympy/printing/tests/test_latex.py::test_Pow", "sympy/printing/tests/test_latex.py::test_issue_7180", "sympy/printing/tests/test_latex.py::test_issue_8409", "sympy/printing/tests/test_latex.py::test_issue_8470", "sympy/printing/tests/test_latex.py::test_issue_7117", "sympy/printing/tests/test_latex.py::test_issue_2934" ]
[]
BSD
336
sympy__sympy-10269
4bc92d1fd8cec503d66f8aed30f9348e7c8b08d1
2015-12-16 13:56:22
0241b35cae5f2adc04dc37b25f7dc9c5f00bd746
diff --git a/sympy/categories/baseclasses.py b/sympy/categories/baseclasses.py index 79b12d08da..9b3d68937a 100644 --- a/sympy/categories/baseclasses.py +++ b/sympy/categories/baseclasses.py @@ -1,6 +1,6 @@ from __future__ import print_function, division -from sympy.core import S, Basic, Dict, Symbol, Tuple, sympify +from sympy.core import S, Basic, Dict, Symbol, Tuple from sympy.core.compatibility import range, iterable from sympy.sets import Set, FiniteSet, EmptySet @@ -722,8 +722,8 @@ def __new__(cls, *args): for morphism in conclusions_arg: # Check that no new objects appear in conclusions. - if ((sympify(objects.contains(morphism.domain)) is S.true) and - (sympify(objects.contains(morphism.codomain)) is S.true)): + if ((objects.contains(morphism.domain) == S.true) and + (objects.contains(morphism.codomain) == S.true)): # No need to add identities and recurse # composites this time. Diagram._add_morphism_closure( @@ -920,14 +920,14 @@ def subdiagram_from_objects(self, objects): new_premises = {} for morphism, props in self.premises.items(): - if ((sympify(objects.contains(morphism.domain)) is S.true) and - (sympify(objects.contains(morphism.codomain)) is S.true)): + if ((objects.contains(morphism.domain) == S.true) and + (objects.contains(morphism.codomain) == S.true)): new_premises[morphism] = props new_conclusions = {} for morphism, props in self.conclusions.items(): - if ((sympify(objects.contains(morphism.domain)) is S.true) and - (sympify(objects.contains(morphism.codomain)) is S.true)): + if ((objects.contains(morphism.domain) == S.true) and + (objects.contains(morphism.codomain) == S.true)): new_conclusions[morphism] = props return Diagram(new_premises, new_conclusions) diff --git a/sympy/combinatorics/named_groups.py b/sympy/combinatorics/named_groups.py index 6febe32ee8..1c5ddb768c 100644 --- a/sympy/combinatorics/named_groups.py +++ b/sympy/combinatorics/named_groups.py @@ -13,8 +13,9 @@ def AbelianGroup(*cyclic_orders): Returns the direct product of cyclic groups with the given orders. According to the structure theorem for finite abelian groups ([1]), - every finite abelian group can be written as the direct product of - finitely many cyclic groups. + every finite abelian group can be written as the direct product of finitely + many cyclic groups. + [1] http://groupprops.subwiki.org/wiki/Structure_theorem_for_finitely_generated_abelian_groups Examples ======== @@ -26,19 +27,12 @@ def AbelianGroup(*cyclic_orders): PermutationGroup([ (6)(0 1 2), (3 4 5 6)]) - >>> _.is_group - True + >>> _.is_group() + False See Also ======== - DirectProduct - - References - ========== - - [1] http://groupprops.subwiki.org/wiki/Structure_theorem_for_finitely_generated_abelian_groups - """ groups = [] degree = 0 @@ -70,8 +64,8 @@ def AlternatingGroup(n): >>> from sympy.combinatorics.named_groups import AlternatingGroup >>> G = AlternatingGroup(4) - >>> G.is_group - True + >>> G.is_group() + False >>> a = list(G.generate_dimino()) >>> len(a) 12 @@ -139,8 +133,8 @@ def CyclicGroup(n): >>> from sympy.combinatorics.named_groups import CyclicGroup >>> G = CyclicGroup(6) - >>> G.is_group - True + >>> G.is_group() + False >>> G.order() 6 >>> list(G.generate_schreier_sims(af=True)) @@ -184,8 +178,8 @@ def DihedralGroup(n): >>> from sympy.combinatorics.named_groups import DihedralGroup >>> G = DihedralGroup(5) - >>> G.is_group - True + >>> G.is_group() + False >>> a = list(G.generate_dimino()) >>> [perm.cyclic_form for perm in a] [[], [[0, 1, 2, 3, 4]], [[0, 2, 4, 1, 3]], @@ -245,8 +239,8 @@ def SymmetricGroup(n): >>> from sympy.combinatorics.named_groups import SymmetricGroup >>> G = SymmetricGroup(4) - >>> G.is_group - True + >>> G.is_group() + False >>> G.order() 24 >>> list(G.generate_schreier_sims(af=True)) @@ -299,8 +293,8 @@ def RubikGroup(n): """Return a group of Rubik's cube generators >>> from sympy.combinatorics.named_groups import RubikGroup - >>> RubikGroup(2).is_group - True + >>> RubikGroup(2).is_group() + False """ from sympy.combinatorics.generators import rubik if n <= 1: diff --git a/sympy/combinatorics/perm_groups.py b/sympy/combinatorics/perm_groups.py index 720abd62d3..f999efcbd7 100644 --- a/sympy/combinatorics/perm_groups.py +++ b/sympy/combinatorics/perm_groups.py @@ -16,7 +16,6 @@ from sympy.ntheory import sieve from sympy.utilities.iterables import has_variety, is_sequence, uniq from sympy.utilities.randtest import _randrange -from itertools import islice rmul = Permutation.rmul_with_af _af_new = Permutation._af_new @@ -117,7 +116,6 @@ class PermutationGroup(Basic): [13] http://www.math.colostate.edu/~hulpke/CGT/cgtnotes.pdf """ - is_group = True def __new__(cls, *args, **kwargs): """The default constructor. Accepts Cycle and Permutation forms. @@ -168,29 +166,11 @@ def __new__(cls, *args, **kwargs): def __getitem__(self, i): return self._generators[i] - def __contains__(self, i): - """Return True if `i` is contained in PermutationGroup. - - Examples - ======== - - >>> from sympy.combinatorics import Permutation, PermutationGroup - >>> p = Permutation(1, 2, 3) - >>> Permutation(3) in PermutationGroup(p) - True - - """ - if not isinstance(i, Permutation): - raise TypeError("A PermutationGroup contains only Permutations as " - "elements, not elements of type %s" % type(i)) - return self.contains(i) - def __len__(self): return len(self._generators) def __eq__(self, other): - """Return True if PermutationGroup generated by elements in the - group are same i.e they represent the same PermutationGroup. + """Return True if self and other have the same generators. Examples ======== @@ -208,25 +188,7 @@ def __eq__(self, other): """ if not isinstance(other, PermutationGroup): return False - - set_self_gens = set(self.generators) - set_other_gens = set(other.generators) - - # before reaching the general case there are also certain - # optimisation and obvious cases requiring less or no actual - # computation. - if set_self_gens == set_other_gens: - return True - - # in the most general case it will check that each generator of - # one group belongs to the other PermutationGroup and vice-versa - for gen1 in set_self_gens: - if not other.contains(gen1): - return False - for gen2 in set_other_gens: - if not self.contains(gen2): - return False - return True + return set(self.generators) == set(other.generators) def __hash__(self): return super(PermutationGroup, self).__hash__() @@ -1059,18 +1021,6 @@ def degree(self): """ return self._degree - @property - def elements(self): - """Returns all the elements of the permutatio group in - a list - - Examples - ======== - - >>> from sympy.combinatorics import Permutation - """ - return set(list(islice(self.generate(), None))) - def derived_series(self): r"""Return the derived series for the group. @@ -1182,14 +1132,14 @@ def generate(self, method="coset", af=False): >>> from sympy.combinatorics import PermutationGroup >>> from sympy.combinatorics.polyhedron import tetrahedron - The permutation group given in the tetrahedron object is also + The permutation group given in the tetrahedron object is not true groups: >>> G = tetrahedron.pgroup - >>> G.is_group - True + >>> G.is_group() + False - Also the group generated by the permutations in the tetrahedron + But the group generated by the permutations in the tetrahedron pgroup -- even the first two -- is a proper group: >>> H = PermutationGroup(G[0], G[1]) @@ -1207,7 +1157,7 @@ def generate(self, method="coset", af=False): (0 3 2), (3)(0 1 2), (0 2)(1 3)]) - >>> _.is_group + >>> _.is_group() True """ if method == "coset": @@ -3153,6 +3103,101 @@ def transitivity_degree(self): else: return self._transitivity_degree + def is_group(self): + """Return True if the group meets three criteria: identity is present, + the inverse of every element is also an element, and the product of + any two elements is also an element. If any of the tests fail, False + is returned. + + Examples + ======== + + >>> from sympy.combinatorics import Permutation + >>> Permutation.print_cyclic = True + >>> from sympy.combinatorics import PermutationGroup + >>> from sympy.combinatorics.polyhedron import tetrahedron + + The permutation group given in the tetrahedron object is not + a true group: + + >>> G = tetrahedron.pgroup + >>> G.is_group() + False + + But the group generated by the permutations in the tetrahedron + pgroup is a proper group: + + >>> H = PermutationGroup(list(G.generate())) + >>> H.is_group() + True + + The identity permutation is present: + + >>> H.has(Permutation(G.degree - 1)) + True + + The product of any two elements from the group is also in the group: + + >>> from sympy import TableForm + >>> g = list(H) + >>> n = len(g) + >>> m = [] + >>> for i in g: + ... m.append([g.index(i*H) for H in g]) + ... + >>> TableForm(m, headings=[range(n), range(n)], wipe_zeros=False) + | 0 1 2 3 4 5 6 7 8 9 10 11 + ---------------------------------------- + 0 | 11 0 8 10 6 2 7 4 5 3 9 1 + 1 | 0 1 2 3 4 5 6 7 8 9 10 11 + 2 | 6 2 7 4 5 3 9 1 11 0 8 10 + 3 | 5 3 9 1 11 0 8 10 6 2 7 4 + 4 | 3 4 0 2 10 6 11 8 9 7 1 5 + 5 | 4 5 6 7 8 9 10 11 0 1 2 3 + 6 | 10 6 11 8 9 7 1 5 3 4 0 2 + 7 | 9 7 1 5 3 4 0 2 10 6 11 8 + 8 | 7 8 4 6 2 10 3 0 1 11 5 9 + 9 | 8 9 10 11 0 1 2 3 4 5 6 7 + 10 | 2 10 3 0 1 11 5 9 7 8 4 6 + 11 | 1 11 5 9 7 8 4 6 2 10 3 0 + >>> + The entries in the table give the element in the group corresponding + to the product of a given column element and row element: + + >>> g[3]*g[2] == g[9] + True + + The inverse of every element is also in the group: + + >>> TableForm([[g.index(~gi) for gi in g]], headings=[[], range(n)], + ... wipe_zeros=False) + 0 1 2 3 4 5 6 7 8 9 10 11 + --------------------------- + 11 1 7 3 10 9 6 2 8 5 4 0 + + So we see that g[1] and g[3] are equivalent to their inverse while + g[7] == ~g[2]. + """ + # identity present + I = Permutation(size=self.degree) + for g in self: + if g == I: + break + else: + return False + + # associativity already holds: a*(b*c) == (a*b)*c for permutations + + # inverse of each is present + if not all(self.has(~a) for a in self): + return False + + # closure + for a in self: + for b in self: + if not self.has(a*b): + return False + return True def _orbit(degree, generators, alpha, action='tuples'): r"""Compute the orbit of alpha ``\{g(\alpha) | g \in G\}`` as a set. diff --git a/sympy/combinatorics/polyhedron.py b/sympy/combinatorics/polyhedron.py index d4ba35609f..3cb730aefc 100644 --- a/sympy/combinatorics/polyhedron.py +++ b/sympy/combinatorics/polyhedron.py @@ -637,9 +637,9 @@ def _pgroup_calcs(): ... tetrahedron, cube, octahedron, dodecahedron, icosahedron) ... >>> polyhedra = (tetrahedron, cube, octahedron, dodecahedron, icosahedron) - >>> [h.pgroup.is_group for h in polyhedra] + >>> [h.pgroup.is_group() for h in polyhedra] ... - [True, True, True, True, True] + [False, False, False, False, False] Although tests in polyhedron's test suite check that powers of the permutations in the groups generate all permutations of the vertices @@ -656,7 +656,7 @@ def _pgroup_calcs(): ... perms.add(p) ... ... perms = [Permutation(p) for p in perms] - ... assert PermutationGroup(perms).is_group + ... assert PermutationGroup(perms).is_group() In addition to doing the above, the tests in the suite confirm that the faces are all present after the application of each permutation. diff --git a/sympy/core/function.py b/sympy/core/function.py index bca7628304..2c2f8e9f4a 100644 --- a/sympy/core/function.py +++ b/sympy/core/function.py @@ -646,23 +646,14 @@ def fdiff(self, argindex=1): """ if not (1 <= argindex <= len(self.args)): raise ArgumentIndexError(self, argindex) - - if self.args[argindex - 1].is_Symbol: - for i in range(len(self.args)): - if i == argindex - 1: - continue - # See issue 8510 - if self.args[argindex - 1] in self.args[i].free_symbols: - break - else: - return Derivative(self, self.args[argindex - 1], evaluate=False) - # See issue 4624 and issue 4719 and issue 5600 - arg_dummy = Dummy('xi_%i' % argindex) - arg_dummy.dummy_index = hash(self.args[argindex - 1]) - new_args = [arg for arg in self.args] - new_args[argindex-1] = arg_dummy - return Subs(Derivative(self.func(*new_args), arg_dummy), - arg_dummy, self.args[argindex - 1]) + if not self.args[argindex - 1].is_Symbol: + # See issue 4624 and issue 4719 and issue 5600 + arg_dummy = Dummy('xi_%i' % argindex) + arg_dummy.dummy_index = hash(self.args[argindex - 1]) + return Subs(Derivative( + self.subs(self.args[argindex - 1], arg_dummy), + arg_dummy), arg_dummy, self.args[argindex - 1]) + return Derivative(self, self.args[argindex - 1], evaluate=False) def _eval_as_leading_term(self, x): """Stub that should be overridden by new Functions to return diff --git a/sympy/matrices/matrices.py b/sympy/matrices/matrices.py index 3a248551eb..7cf41cc890 100644 --- a/sympy/matrices/matrices.py +++ b/sympy/matrices/matrices.py @@ -3617,7 +3617,6 @@ def _jordan_block_structure(self): # `a_0` is `dim(Kernel(Ms[0]) = dim (Kernel(I)) = 0` since `I` is regular l_jordan_chains={} - chain_vectors=[] Ms = [I] Ns = [[]] a = [0] @@ -3673,41 +3672,34 @@ def _jordan_block_structure(self): for s in reversed(range(1, smax+1)): S = Ms[s] - # We want the vectors in `Kernel((self-lI)^s)` (**), - # but without those in `Kernel(self-lI)^s-1` so we will add these as additional equations - # to the system formed by `S` (`S` will no longer be quadratic but this does no harm - # since `S` is rank deficient). + # We want the vectors in `Kernel((self-lI)^s)`, + # but without those in `Kernel(self-lI)^s-1` + # so we will add their adjoints as additional equations + # to the system formed by `S` to get the orthogonal + # complement. + # (`S` will no longer be quadratic.) + exclude_vectors = Ns[s-1] for k in range(0, a[s-1]): S = S.col_join((exclude_vectors[k]).adjoint()) - # We also want to exclude the vectors in the chains for the bigger blocks + + # We also want to exclude the vectors + # in the chains for the bigger blocks # that we have already computed (if there are any). # (That is why we start with the biggest s). - ######## Implementation remark: ######## - - # Doing so for *ALL* already computed chain vectors - # we actually exclude some vectors twice because they are already excluded - # by the condition (**). - # This happens if there are more than one blocks attached to the same eigenvalue *AND* - # the current blocksize is smaller than the block whose chain vectors we exclude. - # If the current block has size `s_i` and the next bigger block has size `s_i-1` then - # the first `s_i-s_i-1` chainvectors of the bigger block are already excluded by (**). - # The unnecassary adding of these equations could be avoided if the algorithm would - # take into account the lengths of the already computed chains which are already stored - # and add only the last `s` items. - # However the following loop would be a good deal more nested to do so. - # Since adding a linear dependent equation does not change the result, - # it can harm only in terms of efficiency. - # So to be sure I left it there for the moment. - - l = len(chain_vectors) - if l > 0: - for k in range(0, l): - old = chain_vectors[k].adjoint() - S = S.col_join(old) + # Since Jordan blocks are not orthogonal in general + # (in the original space), only those chain vectors + # that are on level s (index `s-1` in a chain) + # are added. + + for chain_list in l_jordan_chains.values(): + for chain in chain_list: + S = S.col_join(chain[s-1].adjoint()) + e0s = S.nullspace() - # Determine the number of chain leaders which equals the number of blocks with that size. + # Determine the number of chain leaders + # for blocks of size `s`. n_e0 = len(e0s) s_chains = [] # s_cells=[] @@ -3719,7 +3711,6 @@ def _jordan_block_structure(self): # We want the chain leader appear as the last of the block. chain.reverse() - chain_vectors += chain s_chains.append(chain) l_jordan_chains[s] = s_chains jordan_block_structures[eigenval] = l_jordan_chains diff --git a/sympy/sets/sets.py b/sympy/sets/sets.py index ac7a486dba..3b0ba18a62 100644 --- a/sympy/sets/sets.py +++ b/sympy/sets/sets.py @@ -511,8 +511,8 @@ def __sub__(self, other): return Complement(self, other) def __contains__(self, other): - symb = sympify(self.contains(other)) - if not (symb is S.true or symb is S.false): + symb = self.contains(other) + if symb not in (True, False): raise TypeError('contains did not evaluate to a bool: %r' % symb) return bool(symb) @@ -949,8 +949,8 @@ def _union(self, other): return Interval(start, end, left_open, right_open) # If I have open end points and these endpoints are contained in other - if ((self.left_open and sympify(other.contains(self.start)) is S.true) or - (self.right_open and sympify(other.contains(self.end)) is S.true)): + if ((self.left_open and other.contains(self.start) == True) or + (self.right_open and other.contains(self.end) == True)): # Fill in my end points and return open_left = self.left_open and self.start not in other open_right = self.right_open and self.end not in other @@ -1427,23 +1427,13 @@ def _contains(self, other): return And(*[set.contains(other) for set in self.args]) def __iter__(self): - no_iter = True for s in self.args: if s.is_iterable: - no_iter = False other_sets = set(self.args) - set((s,)) other = Intersection(other_sets, evaluate=False) - for x in s: - c = sympify(other.contains(x)) - if c is S.true: - yield x - elif c is S.false: - pass - else: - yield c + return (x for x in s if x in other) - if no_iter: - raise ValueError("None of the constituent sets are iterable") + raise ValueError("None of the constituent sets are iterable") @staticmethod def _handle_finite_sets(args): @@ -1495,7 +1485,7 @@ def _handle_finite_sets(args): # contained in `v` then remove them from `v` # and add this as a new arg contained = [x for x in symbolic_s_list - if sympify(v.contains(x)) is S.true] + if v.contains(x) == True] if contained != symbolic_s_list: new_args.append( v - FiniteSet( @@ -1847,20 +1837,12 @@ def _complement(self, other): return None elif isinstance(other, FiniteSet): - unk = [] - for i in self: - c = sympify(other.contains(i)) - if c is not S.true and c is not S.false: - unk.append(i) - unk = FiniteSet(*unk) + unk = FiniteSet(*[el for el in self if other.contains(el) + not in (True, False)]) if unk == self: return - not_true = [] - for i in other: - c = sympify(self.contains(i)) - if c is not S.true: - not_true.append(i) - return Complement(FiniteSet(*not_true), unk) + return Complement(FiniteSet(*[el for el in other if + self.contains(el) != True]), unk) return Set._complement(self, other) @@ -1875,7 +1857,7 @@ def _union(self, other): return FiniteSet(*(self._elements | other._elements)) # If other set contains one of my elements, remove it from myself - if any(sympify(other.contains(x)) is S.true for x in self): + if any(other.contains(x) == True for x in self): return set(( FiniteSet(*[x for x in self if other.contains(x) != True]), other)) diff --git a/sympy/stats/rv.py b/sympy/stats/rv.py index 1b04700aec..b3686a1fab 100644 --- a/sympy/stats/rv.py +++ b/sympy/stats/rv.py @@ -387,8 +387,7 @@ def __contains__(self, other): for domain in self.domains: # Collect the parts of this event which associate to this domain elem = frozenset([item for item in other - if sympify(domain.symbols.contains(item[0])) - is S.true]) + if domain.symbols.contains(item[0]) == S.true]) # Test this sub-event if elem not in domain: return False
Matrix.jordan_cells() fails I have SymPy 0.7.7.dev (Python 2.7.6-64-bit) and the following **matrix**: ```python M = Matrix([ ...: [1, 0, 0, 1], ...: [0, 1, 1, 0], ...: [0, 0, 1, 1], ...: [0, 0, 0, 1]]) ``` A 4x4 matrix, not very spectacular. However, the following computation **fails**: ```python M.jordan_cells() ``` Here is what my interpreter says: >/usr/local/lib/python2.7/dist-packages/sympy-0.7.7.dev-py2.7.egg/sympy/matrices/matrices.pyc in jordan_cells(self, calc_transformation) > 3827 P = MutableMatrix.zeros(n) > 3828 for j in range(0, n): >-> 3829 P[:, j] = Pcols_new[j] > 3830 > 3831 return type(self)(P), Jcells >IndexError: list index out of range I believe that the Jordan normal form **can be** computed for this matrix. So I guess I fell in a corner case. The online engine [WolframAlpha](http://www.wolframalpha.com/input/?i=jordan+normal+form+calculator&f1={{1%2C+0%2C+0%2C+1}%2C+{0%2C+1%2C+1%2C+0}%2C+{0%2C+0%2C+1%2C+1}%2C+{0%2C+0%2C+0%2C+1}}&f=JordanDecompositionCalculator.theMatrix_{{1%2C+0%2C+0%2C+1}%2C+{0%2C+1%2C+1%2C+0}%2C+{0%2C+0%2C+1%2C+1}%2C+{0%2C+0%2C+0%2C+1}}) finds a solution. Can anyone help or fix? [jordan_cells_failure.txt](https://github.com/sympy/sympy/files/57273/jordan_cells_failure.txt)
sympy/sympy
diff --git a/sympy/combinatorics/tests/test_perm_groups.py b/sympy/combinatorics/tests/test_perm_groups.py index d8f7118cda..13dbcf2359 100644 --- a/sympy/combinatorics/tests/test_perm_groups.py +++ b/sympy/combinatorics/tests/test_perm_groups.py @@ -8,7 +8,6 @@ from sympy.combinatorics.polyhedron import tetrahedron as Tetra, cube from sympy.combinatorics.testutil import _verify_bsgs, _verify_centralizer,\ _verify_normal_closure -from sympy.utilities.pytest import raises rmul = Permutation.rmul @@ -62,20 +61,6 @@ def test_order(): assert g.order() == 1814400 -def test_equality(): - p_1 = Permutation(0, 1, 3) - p_2 = Permutation(0, 2, 3) - p_3 = Permutation(0, 1, 2) - p_4 = Permutation(0, 1, 3) - g_1 = PermutationGroup(p_1, p_2) - g_2 = PermutationGroup(p_3, p_4) - g_3 = PermutationGroup(p_2, p_1) - - assert g_1 == g_2 - assert g_1.generators != g_2.generators - assert g_1 == g_3 - - def test_stabilizer(): S = SymmetricGroup(2) H = S.stabilizer(0) @@ -210,7 +195,6 @@ def test_coset_factor(): d = Permutation([1, 0, 2, 3, 4, 5]) assert not g.coset_factor(d.array_form) assert not g.contains(d) - assert Permutation(2) in G c = Permutation([1, 0, 2, 3, 5, 4]) v = g.coset_factor(c, True) tr = g.basic_transversals @@ -707,13 +691,3 @@ def test_make_perm(): Permutation([4, 7, 6, 5, 0, 3, 2, 1]) assert cube.pgroup.make_perm(7, seed=list(range(7))) == \ Permutation([6, 7, 3, 2, 5, 4, 0, 1]) - - -def test_elements(): - p = Permutation(2, 3) - assert PermutationGroup(p).elements == set([Permutation(3), Permutation(2, 3)]) - - -def test_is_group(): - assert PermutationGroup(Permutation(1,2), Permutation(2,4)).is_group == True - assert SymmetricGroup(4).is_group == True diff --git a/sympy/combinatorics/tests/test_polyhedron.py b/sympy/combinatorics/tests/test_polyhedron.py index 1b842b043e..6e55499c46 100644 --- a/sympy/combinatorics/tests/test_polyhedron.py +++ b/sympy/combinatorics/tests/test_polyhedron.py @@ -75,7 +75,7 @@ def check(h, size, rpt, target): f = [[c[i] for i in f] for f in P.faces] assert h.faces == Polyhedron(c, f).faces assert len(got) == target - assert PermutationGroup([Permutation(g) for g in got]).is_group + assert PermutationGroup([Permutation(g) for g in got]).is_group() for h, size, rpt, target in zip( (tetrahedron, square, octahedron, dodecahedron, icosahedron), diff --git a/sympy/core/tests/test_function.py b/sympy/core/tests/test_function.py index 96a9514932..86c0b6a723 100644 --- a/sympy/core/tests/test_function.py +++ b/sympy/core/tests/test_function.py @@ -289,12 +289,6 @@ def test_deriv1(): assert f(3*sin(x)).diff(x) == 3*cos(x)*Subs(Derivative(f(x), x), Tuple(x), Tuple(3*sin(x))) - # See issue 8510 - assert f(x, x + z).diff(x) == Subs(Derivative(f(y, x + z), y), Tuple(y), Tuple(x)) \ - + Subs(Derivative(f(x, y), y), Tuple(y), Tuple(x + z)) - assert f(x, x**2).diff(x) == Subs(Derivative(f(y, x**2), y), Tuple(y), Tuple(x)) \ - + 2*x*Subs(Derivative(f(x, y), y), Tuple(y), Tuple(x**2)) - def test_deriv2(): assert (x**3).diff(x) == 3*x**2 diff --git a/sympy/matrices/tests/test_matrices.py b/sympy/matrices/tests/test_matrices.py index dd1bf13efc..61bc128932 100644 --- a/sympy/matrices/tests/test_matrices.py +++ b/sympy/matrices/tests/test_matrices.py @@ -1621,6 +1621,19 @@ def test_jordan_form_complex_issue_9274(): assert J == Jmust1 or J == Jmust2 assert simplify(P*J*P.inv()) == A +def test_issue_10220(): + # two non-orthogonal Jordan blocks with eigenvalue 1 + M = Matrix([[1, 0, 0, 1], + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1]]) + P, C = M.jordan_cells() + assert P == Matrix([[0, 1, 0, 1], + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0]]) + assert len(C) == 2 + def test_Matrix_berkowitz_charpoly(): UA, K_i, K_w = symbols('UA K_i K_w') diff --git a/sympy/sets/tests/test_sets.py b/sympy/sets/tests/test_sets.py index 1afe34c7bb..cc9065d56c 100644 --- a/sympy/sets/tests/test_sets.py +++ b/sympy/sets/tests/test_sets.py @@ -936,8 +936,3 @@ def test_issue_10113(): assert imageset(x, f, S.Reals) == Union(Interval(-oo, 0), Interval(1, oo, True, True)) assert imageset(x, f, Interval(-2, 2)) == Interval(-oo, 0) assert imageset(x, f, Interval(-2, 3)) == Union(Interval(-oo, 0), Interval(S(9)/5, oo)) - - -def test_issue_10248(): - assert list(Intersection(S.Reals, FiniteSet(x))) == [ - And(x < oo, x > -oo)]
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 3, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 8 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 coverage==6.2 execnet==1.9.0 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work mpmath==1.3.0 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 pytest-asyncio==0.16.0 pytest-cov==4.0.0 pytest-mock==3.6.1 pytest-xdist==3.0.2 -e git+https://github.com/sympy/sympy.git@4bc92d1fd8cec503d66f8aed30f9348e7c8b08d1#egg=sympy toml @ file:///tmp/build/80754af9/toml_1616166611790/work tomli==1.2.3 typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: sympy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==6.2 - execnet==1.9.0 - mpmath==1.3.0 - pytest-asyncio==0.16.0 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - pytest-xdist==3.0.2 - tomli==1.2.3 prefix: /opt/conda/envs/sympy
[ "sympy/combinatorics/tests/test_polyhedron.py::test_polyhedron", "sympy/matrices/tests/test_matrices.py::test_issue_10220" ]
[]
[ "sympy/combinatorics/tests/test_perm_groups.py::test_has", "sympy/combinatorics/tests/test_perm_groups.py::test_generate", "sympy/combinatorics/tests/test_perm_groups.py::test_order", "sympy/combinatorics/tests/test_perm_groups.py::test_stabilizer", "sympy/combinatorics/tests/test_perm_groups.py::test_center", "sympy/combinatorics/tests/test_perm_groups.py::test_centralizer", "sympy/combinatorics/tests/test_perm_groups.py::test_coset_rank", "sympy/combinatorics/tests/test_perm_groups.py::test_coset_factor", "sympy/combinatorics/tests/test_perm_groups.py::test_orbits", "sympy/combinatorics/tests/test_perm_groups.py::test_is_normal", "sympy/combinatorics/tests/test_perm_groups.py::test_eq", "sympy/combinatorics/tests/test_perm_groups.py::test_derived_subgroup", "sympy/combinatorics/tests/test_perm_groups.py::test_is_solvable", "sympy/combinatorics/tests/test_perm_groups.py::test_rubik1", "sympy/combinatorics/tests/test_perm_groups.py::test_direct_product", "sympy/combinatorics/tests/test_perm_groups.py::test_orbit_rep", "sympy/combinatorics/tests/test_perm_groups.py::test_schreier_vector", "sympy/combinatorics/tests/test_perm_groups.py::test_random_pr", "sympy/combinatorics/tests/test_perm_groups.py::test_is_alt_sym", "sympy/combinatorics/tests/test_perm_groups.py::test_minimal_block", "sympy/combinatorics/tests/test_perm_groups.py::test_max_div", "sympy/combinatorics/tests/test_perm_groups.py::test_is_primitive", "sympy/combinatorics/tests/test_perm_groups.py::test_random_stab", "sympy/combinatorics/tests/test_perm_groups.py::test_transitivity_degree", "sympy/combinatorics/tests/test_perm_groups.py::test_schreier_sims_random", "sympy/combinatorics/tests/test_perm_groups.py::test_baseswap", "sympy/combinatorics/tests/test_perm_groups.py::test_schreier_sims_incremental", "sympy/combinatorics/tests/test_perm_groups.py::test_subgroup_search", "sympy/combinatorics/tests/test_perm_groups.py::test_normal_closure", "sympy/combinatorics/tests/test_perm_groups.py::test_derived_series", "sympy/combinatorics/tests/test_perm_groups.py::test_lower_central_series", "sympy/combinatorics/tests/test_perm_groups.py::test_commutator", "sympy/combinatorics/tests/test_perm_groups.py::test_is_nilpotent", "sympy/combinatorics/tests/test_perm_groups.py::test_is_trivial", "sympy/combinatorics/tests/test_perm_groups.py::test_pointwise_stabilizer", "sympy/combinatorics/tests/test_perm_groups.py::test_make_perm", "sympy/core/tests/test_function.py::test_f_expand_complex", "sympy/core/tests/test_function.py::test_bug1", "sympy/core/tests/test_function.py::test_general_function", "sympy/core/tests/test_function.py::test_derivative_subs_bug", "sympy/core/tests/test_function.py::test_derivative_subs_self_bug", "sympy/core/tests/test_function.py::test_derivative_linearity", "sympy/core/tests/test_function.py::test_derivative_evaluate", "sympy/core/tests/test_function.py::test_diff_symbols", "sympy/core/tests/test_function.py::test_Function", "sympy/core/tests/test_function.py::test_nargs", "sympy/core/tests/test_function.py::test_Lambda", "sympy/core/tests/test_function.py::test_IdentityFunction", "sympy/core/tests/test_function.py::test_Lambda_symbols", "sympy/core/tests/test_function.py::test_Lambda_arguments", "sympy/core/tests/test_function.py::test_Lambda_equality", "sympy/core/tests/test_function.py::test_Subs", "sympy/core/tests/test_function.py::test_expand_function", "sympy/core/tests/test_function.py::test_function_comparable", "sympy/core/tests/test_function.py::test_deriv1", "sympy/core/tests/test_function.py::test_deriv2", "sympy/core/tests/test_function.py::test_func_deriv", "sympy/core/tests/test_function.py::test_suppressed_evaluation", "sympy/core/tests/test_function.py::test_function_evalf", "sympy/core/tests/test_function.py::test_extensibility_eval", "sympy/core/tests/test_function.py::test_function_non_commutative", "sympy/core/tests/test_function.py::test_function_complex", "sympy/core/tests/test_function.py::test_function__eval_nseries", "sympy/core/tests/test_function.py::test_doit", "sympy/core/tests/test_function.py::test_evalf_default", "sympy/core/tests/test_function.py::test_issue_5399", "sympy/core/tests/test_function.py::test_derivative_numerically", "sympy/core/tests/test_function.py::test_fdiff_argument_index_error", "sympy/core/tests/test_function.py::test_deriv_wrt_function", "sympy/core/tests/test_function.py::test_diff_wrt_value", "sympy/core/tests/test_function.py::test_diff_wrt", "sympy/core/tests/test_function.py::test_diff_wrt_func_subs", "sympy/core/tests/test_function.py::test_diff_wrt_not_allowed", "sympy/core/tests/test_function.py::test_klein_gordon_lagrangian", "sympy/core/tests/test_function.py::test_sho_lagrangian", "sympy/core/tests/test_function.py::test_straight_line", "sympy/core/tests/test_function.py::test_sort_variable", "sympy/core/tests/test_function.py::test_unhandled", "sympy/core/tests/test_function.py::test_issue_4711", "sympy/core/tests/test_function.py::test_nfloat", "sympy/core/tests/test_function.py::test_issue_7068", "sympy/core/tests/test_function.py::test_issue_7231", "sympy/core/tests/test_function.py::test_issue_7687", "sympy/core/tests/test_function.py::test_issue_7688", "sympy/core/tests/test_function.py::test_mexpand", "sympy/core/tests/test_function.py::test_issue_8469", "sympy/core/tests/test_function.py::test_should_evalf", "sympy/matrices/tests/test_matrices.py::test_args", "sympy/matrices/tests/test_matrices.py::test_division", "sympy/matrices/tests/test_matrices.py::test_sum", "sympy/matrices/tests/test_matrices.py::test_addition", "sympy/matrices/tests/test_matrices.py::test_fancy_index_matrix", "sympy/matrices/tests/test_matrices.py::test_multiplication", "sympy/matrices/tests/test_matrices.py::test_power", "sympy/matrices/tests/test_matrices.py::test_creation", "sympy/matrices/tests/test_matrices.py::test_tolist", "sympy/matrices/tests/test_matrices.py::test_as_mutable", "sympy/matrices/tests/test_matrices.py::test_determinant", "sympy/matrices/tests/test_matrices.py::test_det_LU_decomposition", "sympy/matrices/tests/test_matrices.py::test_berkowitz_minors", "sympy/matrices/tests/test_matrices.py::test_slicing", "sympy/matrices/tests/test_matrices.py::test_submatrix_assignment", "sympy/matrices/tests/test_matrices.py::test_extract", "sympy/matrices/tests/test_matrices.py::test_reshape", "sympy/matrices/tests/test_matrices.py::test_applyfunc", "sympy/matrices/tests/test_matrices.py::test_expand", "sympy/matrices/tests/test_matrices.py::test_random", "sympy/matrices/tests/test_matrices.py::test_LUdecomp", "sympy/matrices/tests/test_matrices.py::test_LUsolve", "sympy/matrices/tests/test_matrices.py::test_QRsolve", "sympy/matrices/tests/test_matrices.py::test_inverse", "sympy/matrices/tests/test_matrices.py::test_matrix_inverse_mod", "sympy/matrices/tests/test_matrices.py::test_util", "sympy/matrices/tests/test_matrices.py::test_jacobian_hessian", "sympy/matrices/tests/test_matrices.py::test_QR", "sympy/matrices/tests/test_matrices.py::test_QR_non_square", "sympy/matrices/tests/test_matrices.py::test_nullspace", "sympy/matrices/tests/test_matrices.py::test_columnspace", "sympy/matrices/tests/test_matrices.py::test_wronskian", "sympy/matrices/tests/test_matrices.py::test_eigen", "sympy/matrices/tests/test_matrices.py::test_subs", "sympy/matrices/tests/test_matrices.py::test_simplify", "sympy/matrices/tests/test_matrices.py::test_transpose", "sympy/matrices/tests/test_matrices.py::test_conjugate", "sympy/matrices/tests/test_matrices.py::test_conj_dirac", "sympy/matrices/tests/test_matrices.py::test_trace", "sympy/matrices/tests/test_matrices.py::test_shape", "sympy/matrices/tests/test_matrices.py::test_col_row_op", "sympy/matrices/tests/test_matrices.py::test_zip_row_op", "sympy/matrices/tests/test_matrices.py::test_issue_3950", "sympy/matrices/tests/test_matrices.py::test_issue_3981", "sympy/matrices/tests/test_matrices.py::test_evalf", "sympy/matrices/tests/test_matrices.py::test_is_symbolic", "sympy/matrices/tests/test_matrices.py::test_is_upper", "sympy/matrices/tests/test_matrices.py::test_is_lower", "sympy/matrices/tests/test_matrices.py::test_is_nilpotent", "sympy/matrices/tests/test_matrices.py::test_zeros_ones_fill", "sympy/matrices/tests/test_matrices.py::test_empty_zeros", "sympy/matrices/tests/test_matrices.py::test_issue_3749", "sympy/matrices/tests/test_matrices.py::test_inv_iszerofunc", "sympy/matrices/tests/test_matrices.py::test_jacobian_metrics", "sympy/matrices/tests/test_matrices.py::test_jacobian2", "sympy/matrices/tests/test_matrices.py::test_issue_4564", "sympy/matrices/tests/test_matrices.py::test_nonvectorJacobian", "sympy/matrices/tests/test_matrices.py::test_vec", "sympy/matrices/tests/test_matrices.py::test_vech", "sympy/matrices/tests/test_matrices.py::test_vech_errors", "sympy/matrices/tests/test_matrices.py::test_diag", "sympy/matrices/tests/test_matrices.py::test_get_diag_blocks1", "sympy/matrices/tests/test_matrices.py::test_get_diag_blocks2", "sympy/matrices/tests/test_matrices.py::test_inv_block", "sympy/matrices/tests/test_matrices.py::test_creation_args", "sympy/matrices/tests/test_matrices.py::test_diagonal_symmetrical", "sympy/matrices/tests/test_matrices.py::test_diagonalization", "sympy/matrices/tests/test_matrices.py::test_jordan_form", "sympy/matrices/tests/test_matrices.py::test_jordan_form_complex_issue_9274", "sympy/matrices/tests/test_matrices.py::test_Matrix_berkowitz_charpoly", "sympy/matrices/tests/test_matrices.py::test_exp", "sympy/matrices/tests/test_matrices.py::test_has", "sympy/matrices/tests/test_matrices.py::test_errors", "sympy/matrices/tests/test_matrices.py::test_len", "sympy/matrices/tests/test_matrices.py::test_integrate", "sympy/matrices/tests/test_matrices.py::test_limit", "sympy/matrices/tests/test_matrices.py::test_diff", "sympy/matrices/tests/test_matrices.py::test_getattr", "sympy/matrices/tests/test_matrices.py::test_hessenberg", "sympy/matrices/tests/test_matrices.py::test_cholesky", "sympy/matrices/tests/test_matrices.py::test_LDLdecomposition", "sympy/matrices/tests/test_matrices.py::test_cholesky_solve", "sympy/matrices/tests/test_matrices.py::test_LDLsolve", "sympy/matrices/tests/test_matrices.py::test_lower_triangular_solve", "sympy/matrices/tests/test_matrices.py::test_upper_triangular_solve", "sympy/matrices/tests/test_matrices.py::test_diagonal_solve", "sympy/matrices/tests/test_matrices.py::test_matrix_norm", "sympy/matrices/tests/test_matrices.py::test_singular_values", "sympy/matrices/tests/test_matrices.py::test_condition_number", "sympy/matrices/tests/test_matrices.py::test_equality", "sympy/matrices/tests/test_matrices.py::test_col_join", "sympy/matrices/tests/test_matrices.py::test_row_insert", "sympy/matrices/tests/test_matrices.py::test_col_insert", "sympy/matrices/tests/test_matrices.py::test_normalized", "sympy/matrices/tests/test_matrices.py::test_print_nonzero", "sympy/matrices/tests/test_matrices.py::test_zeros_eye", "sympy/matrices/tests/test_matrices.py::test_is_zero", "sympy/matrices/tests/test_matrices.py::test_rotation_matrices", "sympy/matrices/tests/test_matrices.py::test_DeferredVector", "sympy/matrices/tests/test_matrices.py::test_DeferredVector_not_iterable", "sympy/matrices/tests/test_matrices.py::test_DeferredVector_Matrix", "sympy/matrices/tests/test_matrices.py::test_GramSchmidt", "sympy/matrices/tests/test_matrices.py::test_casoratian", "sympy/matrices/tests/test_matrices.py::test_zero_dimension_multiply", "sympy/matrices/tests/test_matrices.py::test_slice_issue_2884", "sympy/matrices/tests/test_matrices.py::test_slice_issue_3401", "sympy/matrices/tests/test_matrices.py::test_copyin", "sympy/matrices/tests/test_matrices.py::test_invertible_check", "sympy/matrices/tests/test_matrices.py::test_issue_5964", "sympy/matrices/tests/test_matrices.py::test_issue_7604", "sympy/matrices/tests/test_matrices.py::test_is_Identity", "sympy/matrices/tests/test_matrices.py::test_dot", "sympy/matrices/tests/test_matrices.py::test_dual", "sympy/matrices/tests/test_matrices.py::test_anti_symmetric", "sympy/matrices/tests/test_matrices.py::test_normalize_sort_diogonalization", "sympy/matrices/tests/test_matrices.py::test_issue_5321", "sympy/matrices/tests/test_matrices.py::test_issue_5320", "sympy/matrices/tests/test_matrices.py::test_cross", "sympy/matrices/tests/test_matrices.py::test_hash", "sympy/matrices/tests/test_matrices.py::test_adjoint", "sympy/matrices/tests/test_matrices.py::test_simplify_immutable", "sympy/matrices/tests/test_matrices.py::test_rank", "sympy/matrices/tests/test_matrices.py::test_replace", "sympy/matrices/tests/test_matrices.py::test_replace_map", "sympy/matrices/tests/test_matrices.py::test_atoms", "sympy/matrices/tests/test_matrices.py::test_pinv", "sympy/matrices/tests/test_matrices.py::test_pinv_solve", "sympy/matrices/tests/test_matrices.py::test_gauss_jordan_solve", "sympy/matrices/tests/test_matrices.py::test_issue_7201", "sympy/matrices/tests/test_matrices.py::test_free_symbols", "sympy/matrices/tests/test_matrices.py::test_hermitian", "sympy/matrices/tests/test_matrices.py::test_doit", "sympy/matrices/tests/test_matrices.py::test_issue_9457_9467_9876", "sympy/matrices/tests/test_matrices.py::test_issue_9422", "sympy/sets/tests/test_sets.py::test_interval_arguments", "sympy/sets/tests/test_sets.py::test_interval_symbolic_end_points", "sympy/sets/tests/test_sets.py::test_union", "sympy/sets/tests/test_sets.py::test_difference", "sympy/sets/tests/test_sets.py::test_Complement", "sympy/sets/tests/test_sets.py::test_complement", "sympy/sets/tests/test_sets.py::test_intersect", "sympy/sets/tests/test_sets.py::test_intersection", "sympy/sets/tests/test_sets.py::test_issue_9623", "sympy/sets/tests/test_sets.py::test_is_disjoint", "sympy/sets/tests/test_sets.py::test_ProductSet_of_single_arg_is_arg", "sympy/sets/tests/test_sets.py::test_interval_subs", "sympy/sets/tests/test_sets.py::test_interval_to_mpi", "sympy/sets/tests/test_sets.py::test_measure", "sympy/sets/tests/test_sets.py::test_is_subset", "sympy/sets/tests/test_sets.py::test_is_proper_subset", "sympy/sets/tests/test_sets.py::test_is_superset", "sympy/sets/tests/test_sets.py::test_is_proper_superset", "sympy/sets/tests/test_sets.py::test_contains", "sympy/sets/tests/test_sets.py::test_interval_symbolic", "sympy/sets/tests/test_sets.py::test_union_contains", "sympy/sets/tests/test_sets.py::test_is_number", "sympy/sets/tests/test_sets.py::test_Interval_is_left_unbounded", "sympy/sets/tests/test_sets.py::test_Interval_is_right_unbounded", "sympy/sets/tests/test_sets.py::test_Interval_as_relational", "sympy/sets/tests/test_sets.py::test_Finite_as_relational", "sympy/sets/tests/test_sets.py::test_Union_as_relational", "sympy/sets/tests/test_sets.py::test_Intersection_as_relational", "sympy/sets/tests/test_sets.py::test_EmptySet", "sympy/sets/tests/test_sets.py::test_finite_basic", "sympy/sets/tests/test_sets.py::test_powerset", "sympy/sets/tests/test_sets.py::test_product_basic", "sympy/sets/tests/test_sets.py::test_real", "sympy/sets/tests/test_sets.py::test_supinf", "sympy/sets/tests/test_sets.py::test_universalset", "sympy/sets/tests/test_sets.py::test_Union_of_ProductSets_shares", "sympy/sets/tests/test_sets.py::test_Interval_free_symbols", "sympy/sets/tests/test_sets.py::test_image_interval", "sympy/sets/tests/test_sets.py::test_image_piecewise", "sympy/sets/tests/test_sets.py::test_image_FiniteSet", "sympy/sets/tests/test_sets.py::test_image_Union", "sympy/sets/tests/test_sets.py::test_image_EmptySet", "sympy/sets/tests/test_sets.py::test_issue_5724_7680", "sympy/sets/tests/test_sets.py::test_boundary", "sympy/sets/tests/test_sets.py::test_boundary_Union", "sympy/sets/tests/test_sets.py::test_boundary_ProductSet", "sympy/sets/tests/test_sets.py::test_boundary_ProductSet_line", "sympy/sets/tests/test_sets.py::test_is_open", "sympy/sets/tests/test_sets.py::test_is_closed", "sympy/sets/tests/test_sets.py::test_closure", "sympy/sets/tests/test_sets.py::test_interior", "sympy/sets/tests/test_sets.py::test_issue_7841", "sympy/sets/tests/test_sets.py::test_Eq", "sympy/sets/tests/test_sets.py::test_SymmetricDifference", "sympy/sets/tests/test_sets.py::test_issue_9536", "sympy/sets/tests/test_sets.py::test_issue_9637", "sympy/sets/tests/test_sets.py::test_issue_9808", "sympy/sets/tests/test_sets.py::test_issue_9956", "sympy/sets/tests/test_sets.py::test_issue_Symbol_inter", "sympy/sets/tests/test_sets.py::test_issue_10113" ]
[]
BSD
337
pypa__twine-156
5db1018ada0ba4d98201c299c84858e98adb87e3
2015-12-16 23:36:55
f487b7da9c42e4932bc33bf10d70cdc59fd16fd5
diff --git a/README.rst b/README.rst index ab4baa4..9b8f0bb 100644 --- a/README.rst +++ b/README.rst @@ -54,15 +54,15 @@ Usage 1. Create some distributions in the normal way: - .. code-block:: bash +.. code-block:: bash - $ python setup.py sdist bdist_wheel + $ python setup.py sdist bdist_wheel 2. Upload with twine: - .. code-block:: bash +.. code-block:: bash - $ twine upload dist/* + $ twine upload dist/* 3. Done! @@ -73,10 +73,8 @@ Options .. code-block:: bash $ twine upload -h - - usage: twine upload [-h] [-r REPOSITORY] [-s] [--sign-with SIGN_WITH] - [-i IDENTITY] [-u USERNAME] [-p PASSWORD] [-c COMMENT] - [--config-file CONFIG_FILE] [--skip-existing] + usage: twine upload [-h] [-r REPOSITORY] [-s] [-i IDENTITY] [-u USERNAME] + [-p PASSWORD] [-c COMMENT] dist [dist ...] positional arguments: @@ -87,10 +85,8 @@ Options optional arguments: -h, --help show this help message and exit -r REPOSITORY, --repository REPOSITORY - The repository to upload the files to (default: pypi) + The repository to upload the files to -s, --sign Sign files to upload using gpg - --sign-with SIGN_WITH - GPG program used to sign uploads (default: gpg) -i IDENTITY, --identity IDENTITY GPG identity used to sign files -u USERNAME, --username USERNAME @@ -99,9 +95,8 @@ Options The password to authenticate to the repository with -c COMMENT, --comment COMMENT The comment to include with the distribution file - --config-file CONFIG_FILE + --config-file FILE The .pypirc config file to use - --skip-existing Continue uploading files if one already exists Resources diff --git a/docs/changelog.rst b/docs/changelog.rst index 9ad96d0..d16cada 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -4,12 +4,10 @@ Changelog ========= -* :release:`1.7.0 <TBD>` +* :release:`1.6.5 <2015-12-16>` - * :feature:`142` Support ``--cert`` and ``--client-cert`` command-line flags - and config file options for feature parity with pip. This allows users to - verify connections to servers other than PyPI (e.g., local package - repositories) with different certificates. + * :bug:`155` Bump requests-toolbelt version to ensure we avoid + ConnectionErrors * :release:`1.6.4 <2015-10-27>` diff --git a/setup.py b/setup.py index 6053a66..2af1dcc 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ import twine install_requires = [ "pkginfo >= 1.0", "requests >= 2.3.0", - "requests-toolbelt >= 0.4.0", + "requests-toolbelt >= 0.5.1", "setuptools >= 0.7.0", ] diff --git a/twine/__init__.py b/twine/__init__.py index e8f83e4..2549002 100644 --- a/twine/__init__.py +++ b/twine/__init__.py @@ -23,7 +23,7 @@ __title__ = "twine" __summary__ = "Collection of utilities for interacting with PyPI" __uri__ = "https://github.com/pypa/twine" -__version__ = "1.6.4" +__version__ = "1.6.5" __author__ = "Donald Stufft and individual contributors" __email__ = "[email protected]" diff --git a/twine/commands/register.py b/twine/commands/register.py index 16eece0..6be8cc8 100644 --- a/twine/commands/register.py +++ b/twine/commands/register.py @@ -23,8 +23,7 @@ from twine.repository import Repository from twine import utils -def register(package, repository, username, password, comment, config_file, - cert, client_cert): +def register(package, repository, username, password, comment, config_file): config = utils.get_repository_from_config(config_file, repository) config["repository"] = utils.normalize_repository_url( config["repository"] @@ -34,12 +33,8 @@ def register(package, repository, username, password, comment, config_file, username = utils.get_username(username, config) password = utils.get_password(password, config) - ca_cert = utils.get_cacert(cert, config) - client_cert = utils.get_clientcert(client_cert, config) repository = Repository(config["repository"], username, password) - repository.set_certificate_authority(ca_cert) - repository.set_client_certificate(client_cert) if not os.path.exists(package): raise exc.PackageNotFound( @@ -83,17 +78,6 @@ def main(args): default="~/.pypirc", help="The .pypirc config file to use", ) - parser.add_argument( - "--cert", - metavar="path", - help="Path to alternate CA bundle", - ) - parser.add_argument( - "--client-cert", - metavar="path", - help="Path to SSL client certificate, a single file containing the " - "private key and the certificate in PEM forma", - ) parser.add_argument( "package", metavar="package", diff --git a/twine/commands/upload.py b/twine/commands/upload.py index f194d8a..2bd4a52 100644 --- a/twine/commands/upload.py +++ b/twine/commands/upload.py @@ -62,7 +62,7 @@ def skip_upload(response, skip_existing, package): def upload(dists, repository, sign, identity, username, password, comment, - sign_with, config_file, skip_existing, cert, client_cert): + sign_with, config_file, skip_existing): # Check that a nonsensical option wasn't given if not sign and identity: raise ValueError("sign must be given along with identity") @@ -85,12 +85,8 @@ def upload(dists, repository, sign, identity, username, password, comment, username = utils.get_username(username, config) password = utils.get_password(password, config) - ca_cert = utils.get_cacert(cert, config) - client_cert = utils.get_clientcert(client_cert, config) repository = Repository(config["repository"], username, password) - repository.set_certificate_authority(ca_cert) - repository.set_client_certificate(client_cert) for filename in uploads: package = PackageFile.from_filename(filename, comment) @@ -171,17 +167,6 @@ def main(args): action="store_true", help="Continue uploading files if one already exists", ) - parser.add_argument( - "--cert", - metavar="path", - help="Path to alternate CA bundle", - ) - parser.add_argument( - "--client-cert", - metavar="path", - help="Path to SSL client certificate, a single file containing the " - "private key and the certificate in PEM forma", - ) parser.add_argument( "dists", nargs="+", diff --git a/twine/repository.py b/twine/repository.py index ac441f5..ae57821 100644 --- a/twine/repository.py +++ b/twine/repository.py @@ -41,14 +41,6 @@ class Repository(object): data_to_send.append((key, item)) return data_to_send - def set_certificate_authority(self, cacert): - if cacert: - self.session.verify = cacert - - def set_client_certificate(self, clientcert): - if clientcert: - self.session.cert = clientcert - def register(self, package): data = package.metadata_dictionary() data.update({ diff --git a/twine/utils.py b/twine/utils.py index d8771ce..db49d14 100644 --- a/twine/utils.py +++ b/twine/utils.py @@ -116,15 +116,14 @@ def normalize_repository_url(url): return urlunparse(parsed) -def get_userpass_value(cli_value, config, key, prompt_strategy=None): +def get_userpass_value(cli_value, config, key, prompt_strategy): """Gets the username / password from config. Uses the following rules: 1. If it is specified on the cli (`cli_value`), use that. 2. If `config[key]` is specified, use that. - 3. If `prompt_strategy`, prompt using `prompt_strategy`. - 4. Otherwise return None + 3. Otherwise prompt using `prompt_strategy`. :param cli_value: The value supplied from the command line or `None`. :type cli_value: unicode or `None` @@ -141,10 +140,8 @@ def get_userpass_value(cli_value, config, key, prompt_strategy=None): return cli_value elif config.get(key): return config[key] - elif prompt_strategy: - return prompt_strategy() else: - return None + return prompt_strategy() def password_prompt(prompt_text): # Always expects unicode for our own sanity @@ -164,11 +161,3 @@ get_password = functools.partial( key='password', prompt_strategy=password_prompt('Enter your password: '), ) -get_cacert = functools.partial( - get_userpass_value, - key='ca_cert', -) -get_clientcert = functools.partial( - get_userpass_value, - key='client_cert', -)
Requests 2.9.0 and requests-toolbelt < 0.5.1 break uploading We need to bump the lower limit on our dependency of requests-toolbelt. See also: https://github.com/sigmavirus24/requests-toolbelt/issues/117
pypa/twine
diff --git a/tests/test_repository.py b/tests/test_repository.py index 684f403..3b8d84b 100644 --- a/tests/test_repository.py +++ b/tests/test_repository.py @@ -47,29 +47,3 @@ def test_iterables_are_flattened(): tuples = repository.Repository._convert_data_to_list_of_tuples(data) assert tuples == [('platform', 'UNKNOWN'), ('platform', 'ANOTHERPLATFORM')] - - -def test_set_client_certificate(): - repo = repository.Repository( - repository_url='https://pypi.python.org/pypi', - username='username', - password='password', - ) - - assert repo.session.cert is None - - repo.set_client_certificate(('/path/to/cert', '/path/to/key')) - assert repo.session.cert == ('/path/to/cert', '/path/to/key') - - -def test_set_certificate_authority(): - repo = repository.Repository( - repository_url='https://pypi.python.org/pypi', - username='username', - password='password', - ) - - assert repo.session.verify is True - - repo.set_certificate_authority('/path/to/cert') - assert repo.session.verify == '/path/to/cert' diff --git a/tests/test_upload.py b/tests/test_upload.py index 596de80..7f99510 100644 --- a/tests/test_upload.py +++ b/tests/test_upload.py @@ -78,7 +78,6 @@ def test_get_config_old_format(tmpdir): try: upload.upload(dists=dists, repository="pypi", sign=None, identity=None, username=None, password=None, comment=None, - cert=None, client_cert=None, sign_with=None, config_file=pypirc, skip_existing=False) except KeyError as err: assert err.args[0] == (
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 8 }
1.6
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "coverage", "pretend", "flake8" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi==2025.1.31 charset-normalizer==3.4.1 coverage==7.8.0 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work flake8==7.2.0 idna==3.10 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work mccabe==0.7.0 packaging @ file:///croot/packaging_1734472117206/work pkginfo==1.12.1.2 pluggy @ file:///croot/pluggy_1733169602837/work pretend==1.0.9 pycodestyle==2.13.0 pyflakes==3.3.1 pytest @ file:///croot/pytest_1738938843180/work requests==2.32.3 requests-toolbelt==1.0.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work -e git+https://github.com/pypa/twine.git@5db1018ada0ba4d98201c299c84858e98adb87e3#egg=twine urllib3==2.3.0
name: twine channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - certifi==2025.1.31 - charset-normalizer==3.4.1 - coverage==7.8.0 - flake8==7.2.0 - idna==3.10 - mccabe==0.7.0 - pkginfo==1.12.1.2 - pretend==1.0.9 - pycodestyle==2.13.0 - pyflakes==3.3.1 - requests==2.32.3 - requests-toolbelt==1.0.0 - urllib3==2.3.0 prefix: /opt/conda/envs/twine
[ "tests/test_upload.py::test_get_config_old_format" ]
[]
[ "tests/test_repository.py::test_gpg_signature_structure_is_preserved", "tests/test_repository.py::test_content_structure_is_preserved", "tests/test_repository.py::test_iterables_are_flattened", "tests/test_upload.py::test_ensure_wheel_files_uploaded_first", "tests/test_upload.py::test_ensure_if_no_wheel_files", "tests/test_upload.py::test_find_dists_expands_globs", "tests/test_upload.py::test_find_dists_errors_on_invalid_globs", "tests/test_upload.py::test_find_dists_handles_real_files", "tests/test_upload.py::test_skip_existing_skips_files_already_on_PyPI", "tests/test_upload.py::test_skip_upload_respects_skip_existing" ]
[]
Apache License 2.0
338
Shopify__shopify_python_api-130
ecd532cc904729fd366f05ae8d7d754df79b55f7
2015-12-17 02:25:44
c29e0ecbed9de67dd923f980a3ac053922dab75e
diff --git a/shopify/resources/__init__.py b/shopify/resources/__init__.py index adacc08..da17196 100644 --- a/shopify/resources/__init__.py +++ b/shopify/resources/__init__.py @@ -47,5 +47,6 @@ from .smart_collection import SmartCollection from .gift_card import GiftCard from .discount import Discount from .shipping_zone import ShippingZone +from .location import Location from ..base import ShopifyResource diff --git a/shopify/resources/location.py b/shopify/resources/location.py new file mode 100644 index 0000000..671b5b0 --- /dev/null +++ b/shopify/resources/location.py @@ -0,0 +1,5 @@ +from ..base import ShopifyResource + + +class Location(ShopifyResource): + pass
How do I retrieve Location information? I see that dir(shopify) doesn't show me Location as an attribute, but I'm wondering if there's another way to use the API to get it (even though perhaps it's not wrapped as nicely as these other resources): https://docs.shopify.com/api/location dir(shopify) ['Address', 'ApplicationCharge', 'Article', 'Asset', 'BillingAddress', 'Blog', 'CarrierService', 'Cart', 'Checkout', 'Collect', 'Comment', 'Country', 'CustomCollection', 'Customer', 'CustomerGroup', 'CustomerSavedSearch', 'Event', 'Fulfillment', 'FulfillmentService', 'GiftCard', 'Image', 'LineItem', 'Metafield', 'NoteAttribute', 'Option', 'Order', 'OrderRisk', 'Page', 'PaymentDetails', 'Policy', 'Product', 'ProductSearchEngine', 'Province', 'Receipt', 'RecurringApplicationCharge', 'Redirect', 'Rule', 'ScriptTag', 'Session', 'ShippingAddress', 'ShippingLine', 'Shop', 'ShopifyResource', 'SmartCollection', 'TaxLine', 'Theme', 'Transaction', 'VERSION', 'ValidationException', 'Variant', 'Webhook', '__builtins__', '__doc__', '__file__', '__name__', '__package__', '__path__', 'address', 'application_charge', 'article', 'asset', 'base', 'billing_address', 'blog', 'carrier_service', 'cart', 'checkout', 'collect', 'comment', 'country', 'custom_collection', 'customer', 'customer_group', 'customer_saved_search', 'event', 'fulfillment', 'fulfillment_service', 'gift_card', 'image', 'line_item', 'metafield', 'mixins', 'note_attribute', 'option', 'order', 'order_risk', 'page', 'payment_details', 'policy', 'product', 'product_search_engine', 'province', 'receipt', 'recurring_application_charge', 'redirect', 'resources', 'rule', 'script_tag', 'session', 'shipping_address', 'shipping_line', 'shop', 'smart_collection', 'tax_line', 'theme', 'transaction', 'variant', 'version', 'webhook', 'yamlobjects'] Can I use this module to get location information? Thanks!
Shopify/shopify_python_api
diff --git a/test/fixtures/location.json b/test/fixtures/location.json new file mode 100644 index 0000000..ae07fac --- /dev/null +++ b/test/fixtures/location.json @@ -0,0 +1,19 @@ +{ + "location": { + "id": 487838322, + "name": "Fifth Avenue AppleStore", + "deleted_at": null, + "address1": null, + "address2": null, + "city": null, + "zip": null, + "province": null, + "country": "US", + "phone": null, + "created_at": "2015-12-08T11:44:58-05:00", + "updated_at": "2015-12-08T11:44:58-05:00", + "country_code": "US", + "country_name": "United States", + "province_code": null + } +} diff --git a/test/fixtures/locations.json b/test/fixtures/locations.json new file mode 100644 index 0000000..906f7b7 --- /dev/null +++ b/test/fixtures/locations.json @@ -0,0 +1,38 @@ +{ + "locations": [ + { + "id": 487838322, + "name": "Fifth Avenue AppleStore", + "deleted_at": null, + "address1": null, + "address2": null, + "city": null, + "zip": null, + "province": null, + "country": "US", + "phone": null, + "created_at": "2015-12-08T11:44:58-05:00", + "updated_at": "2015-12-08T11:44:58-05:00", + "country_code": "US", + "country_name": "United States", + "province_code": null + }, + { + "id": 1034478814, + "name": "Berlin Store", + "deleted_at": null, + "address1": null, + "address2": null, + "city": null, + "zip": null, + "province": null, + "country": "DE", + "phone": null, + "created_at": "2015-12-08T11:44:58-05:00", + "updated_at": "2015-12-08T11:44:58-05:00", + "country_code": "DE", + "country_name": "Germany", + "province_code": null + } + ] +} diff --git a/test/locations_test.py b/test/locations_test.py new file mode 100644 index 0000000..44a6768 --- /dev/null +++ b/test/locations_test.py @@ -0,0 +1,14 @@ +import shopify +from test.test_helper import TestCase + +class LocationsTest(TestCase): + def test_fetch_locations(self): + self.fake("locations", method='GET', body=self.load_fixture('locations')) + locations = shopify.Location.find() + self.assertEqual(2,len(locations)) + + def test_fetch_location(self): + self.fake("locations/487838322", method='GET', body=self.load_fixture('location')) + location = shopify.Location.find(487838322) + self.assertEqual(location.id,487838322) + self.assertEqual(location.name,"Fifth Avenue AppleStore")
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_added_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 2, "test_score": 1 }, "num_modified_files": 1 }
2.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup==1.2.2 iniconfig==2.1.0 packaging==24.2 pluggy==1.5.0 pyactiveresource==2.2.2 pytest==8.3.5 PyYAML==6.0.2 -e git+https://github.com/Shopify/shopify_python_api.git@ecd532cc904729fd366f05ae8d7d754df79b55f7#egg=ShopifyAPI six==1.17.0 tomli==2.2.1
name: shopify_python_api channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - exceptiongroup==1.2.2 - iniconfig==2.1.0 - packaging==24.2 - pluggy==1.5.0 - pyactiveresource==2.2.2 - pytest==8.3.5 - pyyaml==6.0.2 - six==1.17.0 - tomli==2.2.1 prefix: /opt/conda/envs/shopify_python_api
[ "test/locations_test.py::LocationsTest::test_fetch_location", "test/locations_test.py::LocationsTest::test_fetch_locations" ]
[]
[]
[]
MIT License
339
jupyter-incubator__sparkmagic-73
d9662c5c5b089976810dfe863437d86ae72ccf82
2015-12-18 01:53:03
d9662c5c5b089976810dfe863437d86ae72ccf82
diff --git a/remotespark/RemoteSparkMagics.py b/remotespark/RemoteSparkMagics.py index 361b91b..5271de0 100644 --- a/remotespark/RemoteSparkMagics.py +++ b/remotespark/RemoteSparkMagics.py @@ -55,30 +55,40 @@ class RemoteSparkMagics(Magics): Constants.context_name_sql, Constants.context_name_hive, Constants.context_name_spark)) - @argument("-e", "--endpoint", help="The name of the Livy endpoint to use. " - "If only one endpoint has been created, there's no need to specify one.") + @argument("-s", "--session", help="The name of the Livy session to use. " + "If only one session has been created, there's no need to specify one.") @argument("-t", "--chart", type=str, default="area", help='Chart type to use: table, area, line, bar.') @argument("command", type=str, default=[""], nargs="*", help="Commands to execute.") @line_cell_magic def spark(self, line, cell=""): - """Magic to execute spark remotely. - If invoked with no subcommand, the code will be executed against the specified endpoint. + """Magic to execute spark remotely. + + This magic allows you to create a Livy Scala or Python session against a Livy endpoint. Every session can + be used to execute either Spark code or SparkSQL code by executing against the SQL context in the session. + When the SQL context is used, the result will be a Pandas dataframe of a sample of the results. + + If invoked with no subcommand, the cell will be executed against the specified session. Subcommands ----------- info - Display the mode and available Livy endpoints. + Display the mode and available Livy sessions. add - Add a Livy endpoint. First argument is the friendly name of the endpoint, second argument - is the language, and third argument is the connection string. A fourth argument specifying if - endpoint can be skipped if already present is optional: "skip" or empty. + Add a Livy session. First argument is the name of the session, second argument + is the language, and third argument is the connection string of the Livy endpoint. + A fourth argument specifying if session creation can be skipped if it already exists is optional: + "skip" or empty. e.g. `%%spark add test python url=https://sparkcluster.example.net/livy;username=admin;password=MyPassword skip` or e.g. `%%spark add test python url=https://sparkcluster.example.net/livy;username=admin;password=MyPassword` + run + Run Spark code against a session. + e.g. `%%spark -e testsession` will execute the cell code against the testsession previously created + e.g. `%%spark -e testsession -c sql` will execute the SQL code against the testsession previously created delete - Delete a Livy endpoint. Argument is the friendly name of the endpoint to be deleted. + Delete a Livy session. Argument is the name of the session to be deleted. e.g. `%%spark delete defaultlivy` cleanup - Delete all Livy endpoints. No arguments required. + Delete all Livy sessions created by the notebook. No arguments required. e.g. `%%spark cleanup` """ usage = "Please look at usage of %spark by executing `%spark?`." @@ -102,13 +112,13 @@ class RemoteSparkMagics(Magics): skip = args.command[4].lower() == "skip" else: skip = False - self.spark_controller.add_endpoint(name, language, connection_string, skip) + self.spark_controller.add_session(name, language, connection_string, skip) # delete elif subcommand == "delete": if len(args.command) != 2: raise ValueError("Subcommand 'delete' requires an argument. {}".format(usage)) name = args.command[1].lower() - self.spark_controller.delete_endpoint(name) + self.spark_controller.delete_session(name) # cleanup elif subcommand == "cleanup": self.spark_controller.cleanup() @@ -116,7 +126,7 @@ class RemoteSparkMagics(Magics): elif len(subcommand) == 0: if args.context == Constants.context_name_spark: (success, out) = self.spark_controller.run_cell(cell, - args.endpoint) + args.session) if success: self.ipython.write(out) else: @@ -124,13 +134,13 @@ class RemoteSparkMagics(Magics): elif args.context == Constants.context_name_sql: try: return self.spark_controller.run_cell_sql(cell, - args.endpoint) + args.session) except DataFrameParseException as e: self.ipython.write_err(e.out) elif args.context == Constants.context_name_hive: try: return self.spark_controller.run_cell_hive(cell, - args.endpoint) + args.session) except DataFrameParseException as e: self.ipython.write_err(e.out) else: diff --git a/remotespark/livyclientlib/clientmanager.py b/remotespark/livyclientlib/clientmanager.py index d6466ae..e253735 100644 --- a/remotespark/livyclientlib/clientmanager.py +++ b/remotespark/livyclientlib/clientmanager.py @@ -41,12 +41,12 @@ class ClientManager(object): def _serialize_state(self): self._serializer.serialize_state(self._livy_clients) - def get_endpoints_list(self): + def get_sessions_list(self): return list(self._livy_clients.keys()) def add_client(self, name, livy_client): - if name in self.get_endpoints_list(): - raise ValueError("Endpoint with name '{}' already exists. Please delete the endpoint" + if name in self.get_sessions_list(): + raise ValueError("Session with name '{}' already exists. Please delete the session" " first if you intend to replace it.".format(name)) self._livy_clients[name] = livy_client @@ -54,34 +54,34 @@ class ClientManager(object): def get_any_client(self): number_of_sessions = len(self._livy_clients) if number_of_sessions == 1: - key = self.get_endpoints_list()[0] + key = self.get_sessions_list()[0] return self._livy_clients[key] elif number_of_sessions == 0: raise AssertionError("You need to have at least 1 client created to execute commands.") else: - raise AssertionError("Please specify the client to use. Possible endpoints are {}".format( - self.get_endpoints_list())) + raise AssertionError("Please specify the client to use. Possible sessions are {}".format( + self.get_sessions_list())) def get_client(self, name): - if name in self.get_endpoints_list(): + if name in self.get_sessions_list(): return self._livy_clients[name] - raise ValueError("Could not find '{}' endpoint in list of saved endpoints. Possible endpoints are {}".format( - name, self.get_endpoints_list())) + raise ValueError("Could not find '{}' session in list of saved sessions. Possible sessions are {}".format( + name, self.get_sessions_list())) def delete_client(self, name): - self._remove_endpoint(name) + self._remove_session(name) def clean_up_all(self): - for name in self.get_endpoints_list(): - self._remove_endpoint(name) + for name in self.get_sessions_list(): + self._remove_session(name) if self._serializer is not None: self._serialize_state() - def _remove_endpoint(self, name): - if name in self.get_endpoints_list(): + def _remove_session(self, name): + if name in self.get_sessions_list(): self._livy_clients[name].close_session() del self._livy_clients[name] else: - raise ValueError("Could not find '{}' endpoint in list of saved endpoints. Possible endpoints are {}" - .format(name, self.get_endpoints_list())) + raise ValueError("Could not find '{}' session in list of saved sessions. Possible sessions are {}" + .format(name, self.get_sessions_list())) diff --git a/remotespark/livyclientlib/livyclient.py b/remotespark/livyclientlib/livyclient.py index 87fb395..dedefbb 100644 --- a/remotespark/livyclientlib/livyclient.py +++ b/remotespark/livyclientlib/livyclient.py @@ -7,7 +7,7 @@ from .constants import Constants class LivyClient(object): - """Spark client for Livy endpoint""" + """Spark client for Livy session""" def __init__(self, session): self.logger = Log("LivyClient") diff --git a/remotespark/livyclientlib/livyclientfactory.py b/remotespark/livyclientlib/livyclientfactory.py index 85443b3..2b89045 100644 --- a/remotespark/livyclientlib/livyclientfactory.py +++ b/remotespark/livyclientlib/livyclientfactory.py @@ -11,7 +11,7 @@ from .linearretrypolicy import LinearRetryPolicy class LivyClientFactory(object): - """Spark client for Livy endpoint""" + """Spark client factory""" def __init__(self): self.logger = Log("LivyClientFactory") diff --git a/remotespark/livyclientlib/pandaslivyclientbase.py b/remotespark/livyclientlib/pandaslivyclientbase.py index 692c5d0..72852f1 100644 --- a/remotespark/livyclientlib/pandaslivyclientbase.py +++ b/remotespark/livyclientlib/pandaslivyclientbase.py @@ -5,7 +5,7 @@ from .livyclient import LivyClient from .dataframeparseexception import DataFrameParseException class PandasLivyClientBase(LivyClient): - """Spark client for Livy endpoint that produces pandas df for sql and hive commands.""" + """Spark client for Livy session that produces pandas df for sql and hive commands.""" def __init__(self, session, max_take_rows): super(PandasLivyClientBase, self).__init__(session) self.max_take_rows = max_take_rows diff --git a/remotespark/livyclientlib/pandaspysparklivyclient.py b/remotespark/livyclientlib/pandaspysparklivyclient.py index edf6abb..81c5123 100644 --- a/remotespark/livyclientlib/pandaspysparklivyclient.py +++ b/remotespark/livyclientlib/pandaspysparklivyclient.py @@ -7,7 +7,7 @@ import json from .pandaslivyclientbase import PandasLivyClientBase class PandasPysparkLivyClient(PandasLivyClientBase): - """Spark client for Livy endpoint in PySpark""" + """Spark client for Livy session in PySpark""" def __init__(self, session, max_take_rows): super(PandasPysparkLivyClient, self).__init__(session, max_take_rows) diff --git a/remotespark/livyclientlib/pandasscalalivyclient.py b/remotespark/livyclientlib/pandasscalalivyclient.py index 5b9e031..8bb1ea8 100644 --- a/remotespark/livyclientlib/pandasscalalivyclient.py +++ b/remotespark/livyclientlib/pandasscalalivyclient.py @@ -8,7 +8,7 @@ import re from .pandaslivyclientbase import PandasLivyClientBase class PandasScalaLivyClient(PandasLivyClientBase): - """Spark client for Livy endpoint in Scala""" + """Spark client for Livy session in Scala""" def __init__(self, session, max_take_rows): super(PandasScalaLivyClient, self).__init__(session, max_take_rows) diff --git a/remotespark/livyclientlib/sparkcontroller.py b/remotespark/livyclientlib/sparkcontroller.py index 4ab4dd2..d736011 100644 --- a/remotespark/livyclientlib/sparkcontroller.py +++ b/remotespark/livyclientlib/sparkcontroller.py @@ -1,6 +1,3 @@ -"""Runs Scala, PySpark and SQL statement through Spark using a REST endpoint in remote cluster. -Provides the %spark magic.""" - # Copyright (c) 2015 [email protected] # Distributed under the terms of the Modified BSD License. @@ -38,12 +35,12 @@ class SparkController(object): def cleanup(self): self.client_manager.clean_up_all() - def delete_endpoint(self, name): + def delete_session(self, name): self.client_manager.delete_client(name) - def add_endpoint(self, name, language, connection_string, skip_if_exists): - if skip_if_exists and (name in self.client_manager.get_endpoints_list()): - self.logger.debug("Skipping {} because it already exists in list of endpoints.".format(name)) + def add_session(self, name, language, connection_string, skip_if_exists): + if skip_if_exists and (name in self.client_manager.get_sessions_list()): + self.logger.debug("Skipping {} because it already exists in list of sessions.".format(name)) return session = self.client_factory.create_session(language, connection_string, "-1", False) @@ -52,7 +49,7 @@ class SparkController(object): self.client_manager.add_client(name, livy_client) def get_client_keys(self): - return self.client_manager.get_endpoints_list() + return self.client_manager.get_sessions_list() def get_client_by_name_or_default(self, client_name): if client_name is None: diff --git a/remotespark/sparkkernelbase.py b/remotespark/sparkkernelbase.py index b842b76..d8074f7 100644 --- a/remotespark/sparkkernelbase.py +++ b/remotespark/sparkkernelbase.py @@ -81,11 +81,11 @@ class SparkKernelBase(IPythonKernel): self.already_ran_once = True - add_endpoint_code = "%spark add {} {} {} skip".format( + add_session_code = "%spark add {} {} {} skip".format( self.client_name, self.session_language, connection_string) - self._execute_cell(add_endpoint_code, True, False, shutdown_if_error=True, + self._execute_cell(add_session_code, True, False, shutdown_if_error=True, log_if_error="Failed to create a Livy session.") - self.logger.debug("Added endpoint.") + self.logger.debug("Added session.") def _get_configuration(self): try:
Rename --endpoint param to magics to --session Make -e be -s
jupyter-incubator/sparkmagic
diff --git a/tests/test_clientmanager.py b/tests/test_clientmanager.py index 3376082..b1ef425 100644 --- a/tests/test_clientmanager.py +++ b/tests/test_clientmanager.py @@ -18,13 +18,13 @@ def test_deserialize_on_creation(): serializer.deserialize_state.return_value = [("py", None), ("sc", None)] manager = ClientManager(serializer) - assert "py" in manager.get_endpoints_list() - assert "sc" in manager.get_endpoints_list() + assert "py" in manager.get_sessions_list() + assert "sc" in manager.get_sessions_list() serializer = MagicMock() manager = ClientManager(serializer) - assert len(manager.get_endpoints_list()) == 0 + assert len(manager.get_sessions_list()) == 0 def test_serialize_periodically(): @@ -82,7 +82,7 @@ def test_client_names_returned(): manager.add_client("name0", client) manager.add_client("name1", client) - assert_equals({"name0", "name1"}, set(manager.get_endpoints_list())) + assert_equals({"name0", "name1"}, set(manager.get_sessions_list())) def test_get_any_client(): diff --git a/tests/test_remotesparkmagics.py b/tests/test_remotesparkmagics.py index 363849f..22a70b4 100644 --- a/tests/test_remotesparkmagics.py +++ b/tests/test_remotesparkmagics.py @@ -32,10 +32,10 @@ def test_info_command_parses(): @with_setup(_setup, _teardown) -def test_add_endpoint_command_parses(): +def test_add_sessions_command_parses(): # Do not skip - add_endpoint_mock = MagicMock() - spark_controller.add_endpoint = add_endpoint_mock + add_sessions_mock = MagicMock() + spark_controller.add_session = add_sessions_mock command = "add" name = "name" language = "python" @@ -44,11 +44,11 @@ def test_add_endpoint_command_parses(): magic.spark(line) - add_endpoint_mock.assert_called_once_with(name, language, connection_string, False) + add_sessions_mock.assert_called_once_with(name, language, connection_string, False) # Skip - add_endpoint_mock = MagicMock() - spark_controller.add_endpoint = add_endpoint_mock + add_sessions_mock = MagicMock() + spark_controller.add_session = add_sessions_mock command = "add" name = "name" language = "python" @@ -57,13 +57,13 @@ def test_add_endpoint_command_parses(): magic.spark(line) - add_endpoint_mock.assert_called_once_with(name, language, connection_string, True) + add_sessions_mock.assert_called_once_with(name, language, connection_string, True) @with_setup(_setup, _teardown) -def test_delete_endpoint_command_parses(): +def test_delete_sessions_command_parses(): mock_method = MagicMock() - spark_controller.delete_endpoint = mock_method + spark_controller.delete_session = mock_method command = "delete" name = "name" line = " ".join([command, name]) @@ -98,8 +98,8 @@ def test_run_cell_command_parses(): run_cell_method.return_value = (True, "") spark_controller.run_cell = run_cell_method - command = "-e" - name = "endpoint_name" + command = "-s" + name = "sessions_name" line = " ".join([command, name]) cell = "cell code" diff --git a/tests/test_sparkcontroller.py b/tests/test_sparkcontroller.py index 9dea184..4a20537 100644 --- a/tests/test_sparkcontroller.py +++ b/tests/test_sparkcontroller.py @@ -25,7 +25,7 @@ def _teardown(): @with_setup(_setup, _teardown) -def test_add_endpoint(): +def test_add_session(): name = "name" language = "python" connection_string = "url=http://location:port;username=name;password=word" @@ -34,7 +34,7 @@ def test_add_endpoint(): client_factory.create_session = MagicMock(return_value=session) client_factory.build_client = MagicMock(return_value=client) - controller.add_endpoint(name, language, connection_string, False) + controller.add_session(name, language, connection_string, False) client_factory.create_session.assert_called_once_with(language, connection_string, "-1", False) client_factory.build_client.assert_called_once_with(language, session) @@ -43,7 +43,7 @@ def test_add_endpoint(): @with_setup(_setup, _teardown) -def test_add_endpoint_skip(): +def test_add_session_skip(): name = "name" language = "python" connection_string = "url=http://location:port;username=name;password=word" @@ -52,8 +52,8 @@ def test_add_endpoint_skip(): client_factory.create_session = MagicMock(return_value=session) client_factory.build_client = MagicMock(return_value=client) - client_manager.get_endpoints_list.return_value = [name] - controller.add_endpoint(name, language, connection_string, True) + client_manager.get_sessions_list.return_value = [name] + controller.add_session(name, language, connection_string, True) assert client_factory.create_session.call_count == 0 assert client_factory.build_client.call_count == 0 @@ -62,10 +62,10 @@ def test_add_endpoint_skip(): @with_setup(_setup, _teardown) -def test_delete_endpoint(): +def test_delete_session(): name = "name" - controller.delete_endpoint(name) + controller.delete_session(name) client_manager.delete_client.assert_called_once_with(name) @@ -83,7 +83,7 @@ def test_run_cell(): default_client.execute = chosen_client.execute = MagicMock(return_value=(True,"")) client_manager.get_any_client = MagicMock(return_value=default_client) client_manager.get_client = MagicMock(return_value=chosen_client) - name = "endpoint_name" + name = "session_name" cell = "cell code" controller.run_cell(cell, name) @@ -107,4 +107,4 @@ def test_run_cell(): @with_setup(_setup, _teardown) def test_get_client_keys(): controller.get_client_keys() - client_manager.get_endpoints_list.assert_called_once_with() + client_manager.get_sessions_list.assert_called_once_with()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 9 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "mkdir ~/.sparkmagic", "cp remotespark/default_config.json ~/.sparkmagic/config.json" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
anyio==4.9.0 argon2-cffi==23.1.0 argon2-cffi-bindings==21.2.0 arrow==1.3.0 async-lru==2.0.5 attrs==25.3.0 babel==2.17.0 beautifulsoup4==4.13.3 bleach==6.2.0 certifi==2025.1.31 cffi==1.17.1 charset-normalizer==3.4.1 comm==0.2.2 decorator==5.2.1 defusedxml==0.7.1 exceptiongroup==1.2.2 fastjsonschema==2.21.1 fqdn==1.5.1 h11==0.14.0 httpcore==1.0.7 httpx==0.28.1 idna==3.10 importlib_metadata==8.6.1 iniconfig==2.1.0 ipykernel==4.1.1 ipython==4.0.0 ipython-genutils==0.2.0 ipywidgets==7.8.5 isoduration==20.11.0 Jinja2==3.1.6 json5==0.10.0 jsonpointer==3.0.0 jsonschema==4.23.0 jsonschema-specifications==2024.10.1 jupyter-events==0.12.0 jupyter-lsp==2.2.5 jupyter_client==8.6.3 jupyter_core==5.7.2 jupyter_server==2.15.0 jupyter_server_terminals==0.5.3 jupyterlab==4.1.5 jupyterlab_pygments==0.3.0 jupyterlab_server==2.27.3 jupyterlab_widgets==1.1.11 MarkupSafe==3.0.2 mistune==3.1.3 mock==5.2.0 narwhals==1.32.0 nbclient==0.10.2 nbconvert==7.16.6 nbformat==5.10.4 nose==1.3.7 notebook==7.1.3 notebook_shim==0.2.4 numpy==2.0.2 overrides==7.7.0 packaging==24.2 pandas==2.2.3 pandocfilters==1.5.1 pexpect==4.9.0 pickleshare==0.7.5 platformdirs==4.3.7 plotly==6.0.1 pluggy==1.5.0 prometheus_client==0.21.1 ptyprocess==0.7.0 pycparser==2.22 Pygments==2.19.1 pytest==8.3.5 python-dateutil==2.9.0.post0 python-json-logger==3.3.0 pytz==2025.2 PyYAML==6.0.2 pyzmq==26.3.0 referencing==0.36.2 -e git+https://github.com/jupyter-incubator/sparkmagic.git@d9662c5c5b089976810dfe863437d86ae72ccf82#egg=remotespark requests==2.32.3 rfc3339-validator==0.1.4 rfc3986-validator==0.1.1 rpds-py==0.24.0 Send2Trash==1.8.3 simplegeneric==0.8.1 six==1.17.0 sniffio==1.3.1 soupsieve==2.6 terminado==0.18.1 tinycss2==1.4.0 tomli==2.2.1 tornado==6.4.2 traitlets==5.14.3 types-python-dateutil==2.9.0.20241206 typing_extensions==4.13.0 tzdata==2025.2 uri-template==1.3.0 urllib3==2.3.0 webcolors==24.11.1 webencodings==0.5.1 websocket-client==1.8.0 widgetsnbextension==3.6.10 zipp==3.21.0
name: sparkmagic channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - anyio==4.9.0 - argon2-cffi==23.1.0 - argon2-cffi-bindings==21.2.0 - arrow==1.3.0 - async-lru==2.0.5 - attrs==25.3.0 - babel==2.17.0 - beautifulsoup4==4.13.3 - bleach==6.2.0 - certifi==2025.1.31 - cffi==1.17.1 - charset-normalizer==3.4.1 - comm==0.2.2 - decorator==5.2.1 - defusedxml==0.7.1 - exceptiongroup==1.2.2 - fastjsonschema==2.21.1 - fqdn==1.5.1 - h11==0.14.0 - httpcore==1.0.7 - httpx==0.28.1 - idna==3.10 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - ipykernel==4.1.1 - ipython==4.0.0 - ipython-genutils==0.2.0 - ipywidgets==7.8.5 - isoduration==20.11.0 - jinja2==3.1.6 - json5==0.10.0 - jsonpointer==3.0.0 - jsonschema==4.23.0 - jsonschema-specifications==2024.10.1 - jupyter-client==8.6.3 - jupyter-core==5.7.2 - jupyter-events==0.12.0 - jupyter-lsp==2.2.5 - jupyter-server==2.15.0 - jupyter-server-terminals==0.5.3 - jupyterlab==4.1.5 - jupyterlab-pygments==0.3.0 - jupyterlab-server==2.27.3 - jupyterlab-widgets==1.1.11 - markupsafe==3.0.2 - mistune==3.1.3 - mock==5.2.0 - narwhals==1.32.0 - nbclient==0.10.2 - nbconvert==7.16.6 - nbformat==5.10.4 - nose==1.3.7 - notebook==7.1.3 - notebook-shim==0.2.4 - numpy==2.0.2 - overrides==7.7.0 - packaging==24.2 - pandas==2.2.3 - pandocfilters==1.5.1 - pexpect==4.9.0 - pickleshare==0.7.5 - platformdirs==4.3.7 - plotly==6.0.1 - pluggy==1.5.0 - prometheus-client==0.21.1 - ptyprocess==0.7.0 - pycparser==2.22 - pygments==2.19.1 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - python-json-logger==3.3.0 - pytz==2025.2 - pyyaml==6.0.2 - pyzmq==26.3.0 - referencing==0.36.2 - requests==2.32.3 - rfc3339-validator==0.1.4 - rfc3986-validator==0.1.1 - rpds-py==0.24.0 - send2trash==1.8.3 - simplegeneric==0.8.1 - six==1.17.0 - sniffio==1.3.1 - soupsieve==2.6 - terminado==0.18.1 - tinycss2==1.4.0 - tomli==2.2.1 - tornado==6.4.2 - traitlets==5.14.3 - types-python-dateutil==2.9.0.20241206 - typing-extensions==4.13.0 - tzdata==2025.2 - uri-template==1.3.0 - urllib3==2.3.0 - webcolors==24.11.1 - webencodings==0.5.1 - websocket-client==1.8.0 - widgetsnbextension==3.6.10 - zipp==3.21.0 prefix: /opt/conda/envs/sparkmagic
[ "tests/test_clientmanager.py::test_deserialize_on_creation", "tests/test_clientmanager.py::test_client_names_returned" ]
[ "tests/test_remotesparkmagics.py::test_info_command_parses", "tests/test_remotesparkmagics.py::test_add_sessions_command_parses", "tests/test_remotesparkmagics.py::test_delete_sessions_command_parses", "tests/test_remotesparkmagics.py::test_cleanup_command_parses", "tests/test_remotesparkmagics.py::test_bad_command_throws_exception", "tests/test_remotesparkmagics.py::test_run_cell_command_parses", "tests/test_sparkcontroller.py::test_add_session", "tests/test_sparkcontroller.py::test_add_session_skip", "tests/test_sparkcontroller.py::test_delete_session", "tests/test_sparkcontroller.py::test_cleanup", "tests/test_sparkcontroller.py::test_run_cell", "tests/test_sparkcontroller.py::test_get_client_keys" ]
[ "tests/test_clientmanager.py::test_get_client_throws_when_client_not_exists", "tests/test_clientmanager.py::test_serialize_periodically", "tests/test_clientmanager.py::test_get_client", "tests/test_clientmanager.py::test_delete_client", "tests/test_clientmanager.py::test_delete_client_throws_when_client_not_exists", "tests/test_clientmanager.py::test_add_client_throws_when_client_exists", "tests/test_clientmanager.py::test_get_any_client", "tests/test_clientmanager.py::test_get_any_client_raises_exception_with_no_client", "tests/test_clientmanager.py::test_get_any_client_raises_exception_with_two_clients", "tests/test_clientmanager.py::test_clean_up", "tests/test_clientmanager.py::test_clean_up_serializer" ]
[]
Modified BSD License
340
joke2k__faker-314
9f338881f582807fd9d1339b6148b039f8141bb3
2015-12-18 19:49:49
883576c2d718ad7f604415e02a898f1f917d5b86
diff --git a/faker/providers/misc/__init__.py b/faker/providers/misc/__init__.py index 32531a60..9b318691 100644 --- a/faker/providers/misc/__init__.py +++ b/faker/providers/misc/__init__.py @@ -88,13 +88,33 @@ class Provider(BaseProvider): @param lower_case: Boolean. Whether to use lower letters @return: String. Random password """ - chars = "" + choices = "" + required_tokens = [] if special_chars: - chars += "!@#$%^&*()_+" + required_tokens.append(random.choice("!@#$%^&*()_+")) + choices += "!@#$%^&*()_+" if digits: - chars += string.digits + required_tokens.append(random.choice(string.digits)) + choices += string.digits if upper_case: - chars += string.ascii_uppercase + required_tokens.append(random.choice(string.ascii_uppercase)) + choices += string.ascii_uppercase if lower_case: - chars += string.ascii_lowercase - return ''.join(random.choice(chars) for x in range(length)) + required_tokens.append(random.choice(string.ascii_lowercase)) + choices += string.ascii_lowercase + + assert len(required_tokens) <= length, "Required length is shorter than required characters" + + # Generate a first version of the password + chars = [random.choice(choices) for x in range(length)] + + # Pick some unique locations + random_indexes = set() + while len(random_indexes) < len(required_tokens): + random_indexes.add(random.randint(0, len(chars) - 1)) + + # Replace them with the required characters + for i, index in enumerate(random_indexes): + chars[index] = required_tokens[i] + + return ''.join(chars)
Param switches on faker.password() don't guarantee valid password The format switches on `faker.password()` (`special_chars, digits, upper_case, lower_case`) don't always return passwords matching those rules. This is problematic as when using generated passwords in unit tests, where passwords must conform to validity rules (e.g. "must contain numbers"), tests can randomly fail. I expected that these switches would guarantee the function returns a conforming password. e.g. `faker.password(digits=True)` always returns a password containing digits, but this is not the case.
joke2k/faker
diff --git a/faker/tests/__init__.py b/faker/tests/__init__.py index 4eeaa3c7..6502a448 100644 --- a/faker/tests/__init__.py +++ b/faker/tests/__init__.py @@ -9,6 +9,7 @@ import json import os import time import unittest +import string import sys try: @@ -458,6 +459,22 @@ class FactoryTestCase(unittest.TestCase): datetime.datetime.now(utc).replace(second=0, microsecond=0) ) + def test_password(self): + from faker.providers.misc import Provider + + def in_string(char, _str): + return char in _str + + for _ in range(999): + password = Provider.password() + + self.assertTrue(any([in_string(char, password) for char in "!@#$%^&*()_+"])) + self.assertTrue(any([in_string(char, password) for char in string.digits])) + self.assertTrue(any([in_string(char, password) for char in string.ascii_uppercase])) + self.assertTrue(any([in_string(char, password) for char in string.ascii_lowercase])) + + self.assertRaises(AssertionError, Provider.password, length=2) + def test_prefix_suffix_always_string(self): # Locales known to contain `*_male` and `*_female`. for locale in ("bg_BG", "dk_DK", "en", "ru_RU", "tr_TR"):
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 1 }
0.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": [ "faker/tests/requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
dnspython==2.7.0 email_validator==2.2.0 exceptiongroup==1.2.2 -e git+https://github.com/joke2k/faker.git@9f338881f582807fd9d1339b6148b039f8141bb3#egg=fake_factory idna==3.10 iniconfig==2.1.0 mock==1.0.1 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 python-dateutil==2.9.0.post0 six==1.17.0 tomli==2.2.1 UkPostcodeParser==1.0.3
name: faker channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - dnspython==2.7.0 - email-validator==2.2.0 - exceptiongroup==1.2.2 - idna==3.10 - iniconfig==2.1.0 - mock==1.0.1 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - six==1.17.0 - tomli==2.2.1 - ukpostcodeparser==1.0.3 prefix: /opt/conda/envs/faker
[ "faker/tests/__init__.py::FactoryTestCase::test_password" ]
[]
[ "faker/tests/__init__.py::ShimsTestCase::test_counter", "faker/tests/__init__.py::UtilsTestCase::test_add_dicts", "faker/tests/__init__.py::UtilsTestCase::test_choice_distribution", "faker/tests/__init__.py::UtilsTestCase::test_find_available_locales", "faker/tests/__init__.py::UtilsTestCase::test_find_available_providers", "faker/tests/__init__.py::FactoryTestCase::test_add_provider_gives_priority_to_newly_added_provider", "faker/tests/__init__.py::FactoryTestCase::test_command", "faker/tests/__init__.py::FactoryTestCase::test_command_custom_provider", "faker/tests/__init__.py::FactoryTestCase::test_date_time_between_dates", "faker/tests/__init__.py::FactoryTestCase::test_date_time_between_dates_with_tzinfo", "faker/tests/__init__.py::FactoryTestCase::test_date_time_this_period", "faker/tests/__init__.py::FactoryTestCase::test_date_time_this_period_with_tzinfo", "faker/tests/__init__.py::FactoryTestCase::test_datetime_safe", "faker/tests/__init__.py::FactoryTestCase::test_datetimes_with_and_without_tzinfo", "faker/tests/__init__.py::FactoryTestCase::test_documentor", "faker/tests/__init__.py::FactoryTestCase::test_format_calls_formatter_on_provider", "faker/tests/__init__.py::FactoryTestCase::test_format_transfers_arguments_to_formatter", "faker/tests/__init__.py::FactoryTestCase::test_get_formatter_returns_callable", "faker/tests/__init__.py::FactoryTestCase::test_get_formatter_returns_correct_formatter", "faker/tests/__init__.py::FactoryTestCase::test_get_formatter_throws_exception_on_incorrect_formatter", "faker/tests/__init__.py::FactoryTestCase::test_magic_call_calls_format", "faker/tests/__init__.py::FactoryTestCase::test_magic_call_calls_format_with_arguments", "faker/tests/__init__.py::FactoryTestCase::test_no_words_paragraph", "faker/tests/__init__.py::FactoryTestCase::test_no_words_sentence", "faker/tests/__init__.py::FactoryTestCase::test_parse_returns_same_string_when_it_contains_no_curly_braces", "faker/tests/__init__.py::FactoryTestCase::test_parse_returns_string_with_tokens_replaced_by_formatters", "faker/tests/__init__.py::FactoryTestCase::test_prefix_suffix_always_string", "faker/tests/__init__.py::FactoryTestCase::test_random_element", "faker/tests/__init__.py::FactoryTestCase::test_slugify", "faker/tests/__init__.py::FactoryTestCase::test_timezone_conversion", "faker/tests/__init__.py::FactoryTestCase::test_us_ssn_valid", "faker/tests/__init__.py::GeneratorTestCase::test_random_seed_doesnt_seed_system_random" ]
[]
MIT License
341
pika__pika-675
d8a782d97579cd96ed67ccfb55f63ca8fdafa199
2015-12-19 02:54:17
f73f9bbaddd90b03583a6693f6158e56fbede948
vitaly-krugl: CC @gmr: Hi Gavin, please review this fix. vitaly-krugl: @gmr, the failed test `BlockingChannel.basic_nack single message` is unrelated to the changes in this PR; it's a known issue described in #677. Please re-start the build. Thanks!
diff --git a/pika/adapters/libev_connection.py b/pika/adapters/libev_connection.py index 2a27055..26a6547 100644 --- a/pika/adapters/libev_connection.py +++ b/pika/adapters/libev_connection.py @@ -127,7 +127,7 @@ class LibevConnection(BaseConnection): if self._on_signal_callback and not global_sigterm_watcher: global_sigterm_watcher = \ self.ioloop.signal(signal.SIGTERM, - self._handle_sigterm) + self._handle_sigterm) if self._on_signal_callback and not global_sigint_watcher: global_sigint_watcher = self.ioloop.signal(signal.SIGINT, @@ -136,8 +136,8 @@ class LibevConnection(BaseConnection): if not self._io_watcher: self._io_watcher = \ self.ioloop.io(self.socket.fileno(), - self._PIKA_TO_LIBEV_ARRAY[self.event_state], - self._handle_events) + self._PIKA_TO_LIBEV_ARRAY[self.event_state], + self._handle_events) self.async = pyev.Async(self.ioloop, self._noop_callable) self.async.start() @@ -209,8 +209,9 @@ class LibevConnection(BaseConnection): self._PIKA_TO_LIBEV_ARRAY[self.event_state]) break - except: # sometimes the stop() doesn't complete in time - if retries > 5: raise + except Exception: # sometimes the stop() doesn't complete in time + if retries > 5: + raise self._io_watcher.stop() # so try it again retries += 1 @@ -268,7 +269,7 @@ class LibevConnection(BaseConnection): :rtype: timer instance handle. """ - LOGGER.debug('deadline: {0}'.format(deadline)) + LOGGER.debug('deadline: %s', deadline) timer = self._get_timer(deadline) self._active_timers[timer] = (callback_method, callback_timeout, callback_kwargs) diff --git a/pika/channel.py b/pika/channel.py index 641e469..5c67c49 100644 --- a/pika/channel.py +++ b/pika/channel.py @@ -436,7 +436,10 @@ class Channel(object): For more information see: http://www.rabbitmq.com/extensions.html#confirms - :param method callback: The callback for delivery confirmations + :param method callback: The callback for delivery confirmations that has + the following signature: callback(pika.frame.Method), where + method_frame contains either method `spec.Basic.Ack` or + `spec.Basic.Nack` :param bool nowait: Do not send a reply frame (Confirm.SelectOk) """ @@ -674,7 +677,8 @@ class Channel(object): Leave the queue name empty for a auto-named queue in RabbitMQ - :param method callback: The method to call on Queue.DeclareOk + :param method callback: callback(pika.frame.Method) for method + Queue.DeclareOk :param queue: The queue name :type queue: str or unicode :param bool passive: Only check to see if the queue exists @@ -694,7 +698,8 @@ class Channel(object): self._validate_channel_and_callback(callback) return self._rpc(spec.Queue.Declare(0, queue, passive, durable, exclusive, auto_delete, nowait, - arguments or dict()), callback, + arguments or dict()), + callback, replies) def queue_delete(self, @@ -1087,52 +1092,74 @@ class Channel(object): """ LOGGER.debug('%i blocked frames', len(self._blocked)) self._blocking = None - while len(self._blocked) > 0 and self._blocking is None: + while self._blocked and self._blocking is None: self._rpc(*self._blocked.popleft()) def _rpc(self, method_frame, callback=None, acceptable_replies=None): - """Shortcut wrapper to the Connection's rpc command using its callback - stack, passing in our channel number. + """Make a syncronous channel RPC call for a synchronous method frame. If + the channel is already in the blocking state, then enqueue the request, + but don't send it at this time; it will be eventually sent by + `_on_synchronous_complete` after the prior blocking request receives a + resposne. If the channel is not in the blocking state and + `acceptable_replies` is not empty, transition the channel to the + blocking state and register for `_on_synchronous_complete` before + sending the request. + + NOTE: A populated callback must be accompanied by populated + acceptable_replies. :param pika.amqp_object.Method method_frame: The method frame to call :param method callback: The callback for the RPC response :param list acceptable_replies: The replies this RPC call expects """ + assert method_frame.synchronous, ( + 'Only synchronous-capable frames may be used with _rpc: %r' + % (method_frame,)) + + # Validate we got None or a list of acceptable_replies + if not isinstance(acceptable_replies, (type(None), list)): + raise TypeError('acceptable_replies should be list or None') + + # Validate the callback is callable + if callback is not None and not is_callable(callback): + raise TypeError('callback should be None, a function or method.') + + if callback is not None and not acceptable_replies: + raise ValueError('A populated callback must be accompanied by ' + 'populated acceptable_replies') + # Make sure the channel is open if self.is_closed: raise exceptions.ChannelClosed # If the channel is blocking, add subsequent commands to our stack if self._blocking: + LOGGER.debug('Already in blocking state, so enqueueing frame %s; ' + 'acceptable_replies=%r', + method_frame, acceptable_replies) return self._blocked.append([method_frame, callback, acceptable_replies]) - # Validate we got None or a list of acceptable_replies - if acceptable_replies and not isinstance(acceptable_replies, list): - raise TypeError("acceptable_replies should be list or None") - - # Validate the callback is callable - if callback and not is_callable(callback): - raise TypeError("callback should be None, a function or method.") - - # Block until a response frame is received for synchronous frames - if method_frame.synchronous: - self._blocking = method_frame.NAME - # If acceptable replies are set, add callbacks if acceptable_replies: - for reply in acceptable_replies or list(): + # Block until a response frame is received for synchronous frames + self._blocking = method_frame.NAME + LOGGER.debug( + 'Entering blocking state on frame %s; acceptable_replies=%r', + method_frame, acceptable_replies) + + for reply in acceptable_replies: if isinstance(reply, tuple): reply, arguments = reply else: arguments = None - LOGGER.debug('Adding in on_synchronous_complete callback') + LOGGER.debug('Adding on_synchronous_complete callback') self.callbacks.add(self.channel_number, reply, self._on_synchronous_complete, arguments=arguments) - if callback: - LOGGER.debug('Adding passed in callback') + if callback is not None: + LOGGER.debug('Adding passed-in callback') self.callbacks.add(self.channel_number, reply, callback, arguments=arguments)
Regression: Cannot receive long running messages when in use in tornado app Hi there, we just noticed a regression from pika 0.9.14 to 0.10.0. We haven't been able to track down the problem further than this (sorry) but will instead stop for now by pinning the old version. The problem seems to be that pika (used via tornado-celery) is not able to receive answer messages from rabbitmq for longish running (>4-5 seconds) requests to a background worker. The answer message instead seems to time out after the message timeout period in rabbitmq after which it is lost. Do you need more information to fix this?
pika/pika
diff --git a/tests/acceptance/async_adapter_tests.py b/tests/acceptance/async_adapter_tests.py index 9d7448e..9a44208 100644 --- a/tests/acceptance/async_adapter_tests.py +++ b/tests/acceptance/async_adapter_tests.py @@ -1,13 +1,25 @@ +# Suppress pylint messages concerning missing class and method docstrings +# pylint: disable=C0111 + +# Suppress pylint warning about attribute defined outside __init__ +# pylint: disable=W0201 + +# Suppress pylint warning about access to protected member +# pylint: disable=W0212 + +# Suppress pylint warning about unused argument +# pylint: disable=W0613 + import time import uuid -from pika import spec, URLParameters +from pika import spec from pika.compat import as_bytes from async_test_base import (AsyncTestCase, BoundQueueTestCase, AsyncAdapters) -class TestA_Connect(AsyncTestCase, AsyncAdapters): +class TestA_Connect(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 DESCRIPTION = "Connect, open channel and disconnect" def begin(self, channel): @@ -26,11 +38,49 @@ class TestConfirmSelect(AsyncTestCase, AsyncAdapters): self.stop() +class TestBlockingNonBlockingBlockingRPCWontStall(AsyncTestCase, AsyncAdapters): + DESCRIPTION = ("Verify that a sequence of blocking, non-blocking, blocking " + "RPC requests won't stall") + + def begin(self, channel): + # Queue declaration params table: queue name, nowait value + self._expected_queue_params = ( + ("blocking-non-blocking-stall-check-" + uuid.uuid1().hex, False), + ("blocking-non-blocking-stall-check-" + uuid.uuid1().hex, True), + ("blocking-non-blocking-stall-check-" + uuid.uuid1().hex, False) + ) + + self._declared_queue_names = [] + + for queue, nowait in self._expected_queue_params: + channel.queue_declare(callback=self._queue_declare_ok_cb + if not nowait else None, + queue=queue, + auto_delete=True, + nowait=nowait, + arguments={'x-expires': self.TIMEOUT * 1000}) + + def _queue_declare_ok_cb(self, declare_ok_frame): + self._declared_queue_names.append(declare_ok_frame.method.queue) + + if len(self._declared_queue_names) == 2: + # Initiate check for creation of queue declared with nowait=True + self.channel.queue_declare(callback=self._queue_declare_ok_cb, + queue=self._expected_queue_params[1][0], + passive=True, + nowait=False) + elif len(self._declared_queue_names) == 3: + self.assertSequenceEqual( + sorted(self._declared_queue_names), + sorted(item[0] for item in self._expected_queue_params)) + self.stop() + + class TestConsumeCancel(AsyncTestCase, AsyncAdapters): DESCRIPTION = "Consume and cancel" def begin(self, channel): - self.queue_name = str(uuid.uuid4()) + self.queue_name = self.__class__.__name__ + ':' + uuid.uuid1().hex channel.queue_declare(self.on_queue_declared, queue=self.queue_name) def on_queue_declared(self, frame): @@ -58,7 +108,7 @@ class TestExchangeDeclareAndDelete(AsyncTestCase, AsyncAdapters): X_TYPE = 'direct' def begin(self, channel): - self.name = self.__class__.__name__ + ':' + str(id(self)) + self.name = self.__class__.__name__ + ':' + uuid.uuid1().hex channel.exchange_declare(self.on_exchange_declared, self.name, exchange_type=self.X_TYPE, passive=False, @@ -81,7 +131,7 @@ class TestExchangeRedeclareWithDifferentValues(AsyncTestCase, AsyncAdapters): X_TYPE2 = 'topic' def begin(self, channel): - self.name = self.__class__.__name__ + ':' + str(id(self)) + self.name = self.__class__.__name__ + ':' + uuid.uuid1().hex self.channel.add_on_close_callback(self.on_channel_closed) channel.exchange_declare(self.on_exchange_declared, self.name, exchange_type=self.X_TYPE1, @@ -97,7 +147,7 @@ class TestExchangeRedeclareWithDifferentValues(AsyncTestCase, AsyncAdapters): self.connection.channel(self.on_cleanup_channel) def on_exchange_declared(self, frame): - self.channel.exchange_declare(self.on_exchange_declared, self.name, + self.channel.exchange_declare(self.on_bad_result, self.name, exchange_type=self.X_TYPE2, passive=False, durable=False, @@ -134,7 +184,8 @@ class TestQueueNameDeclareAndDelete(AsyncTestCase, AsyncAdapters): DESCRIPTION = "Create and delete a named queue" def begin(self, channel): - channel.queue_declare(self.on_queue_declared, str(id(self)), + self._q_name = self.__class__.__name__ + ':' + uuid.uuid1().hex + channel.queue_declare(self.on_queue_declared, self._q_name, passive=False, durable=False, exclusive=True, @@ -143,10 +194,9 @@ class TestQueueNameDeclareAndDelete(AsyncTestCase, AsyncAdapters): arguments={'x-expires': self.TIMEOUT * 1000}) def on_queue_declared(self, frame): - queue = str(id(self)) self.assertIsInstance(frame.method, spec.Queue.DeclareOk) # Frame's method's queue is encoded (impl detail) - self.assertEqual(frame.method.queue, queue) + self.assertEqual(frame.method.queue, self._q_name) self.channel.queue_delete(self.on_queue_delete, frame.method.queue) def on_queue_delete(self, frame): @@ -159,8 +209,9 @@ class TestQueueRedeclareWithDifferentValues(AsyncTestCase, AsyncAdapters): DESCRIPTION = "Should close chan: re-declared queue w/ diff params" def begin(self, channel): + self._q_name = self.__class__.__name__ + ':' + uuid.uuid1().hex self.channel.add_on_close_callback(self.on_channel_closed) - channel.queue_declare(self.on_queue_declared, str(id(self)), + channel.queue_declare(self.on_queue_declared, self._q_name, passive=False, durable=False, exclusive=True, @@ -172,7 +223,7 @@ class TestQueueRedeclareWithDifferentValues(AsyncTestCase, AsyncAdapters): self.stop() def on_queue_declared(self, frame): - self.channel.queue_declare(self.on_bad_result, str(id(self)), + self.channel.queue_declare(self.on_bad_result, self._q_name, passive=False, durable=True, exclusive=False, @@ -181,13 +232,13 @@ class TestQueueRedeclareWithDifferentValues(AsyncTestCase, AsyncAdapters): arguments={'x-expires': self.TIMEOUT * 1000}) def on_bad_result(self, frame): - self.channel.queue_delete(None, str(id(self)), nowait=True) + self.channel.queue_delete(None, self._q_name, nowait=True) raise AssertionError("Should not have received a Queue.DeclareOk") -class TestTX1_Select(AsyncTestCase, AsyncAdapters): - DESCRIPTION="Receive confirmation of Tx.Select" +class TestTX1_Select(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 + DESCRIPTION = "Receive confirmation of Tx.Select" def begin(self, channel): channel.tx_select(self.on_complete) @@ -198,8 +249,8 @@ class TestTX1_Select(AsyncTestCase, AsyncAdapters): -class TestTX2_Commit(AsyncTestCase, AsyncAdapters): - DESCRIPTION="Start a transaction, and commit it" +class TestTX2_Commit(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 + DESCRIPTION = "Start a transaction, and commit it" def begin(self, channel): channel.tx_select(self.on_selectok) @@ -213,7 +264,7 @@ class TestTX2_Commit(AsyncTestCase, AsyncAdapters): self.stop() -class TestTX2_CommitFailure(AsyncTestCase, AsyncAdapters): +class TestTX2_CommitFailure(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 DESCRIPTION = "Close the channel: commit without a TX" def begin(self, channel): @@ -226,11 +277,12 @@ class TestTX2_CommitFailure(AsyncTestCase, AsyncAdapters): def on_selectok(self, frame): self.assertIsInstance(frame.method, spec.Tx.SelectOk) - def on_commitok(self, frame): + @staticmethod + def on_commitok(frame): raise AssertionError("Should not have received a Tx.CommitOk") -class TestTX3_Rollback(AsyncTestCase, AsyncAdapters): +class TestTX3_Rollback(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 DESCRIPTION = "Start a transaction, then rollback" def begin(self, channel): @@ -246,7 +298,7 @@ class TestTX3_Rollback(AsyncTestCase, AsyncAdapters): -class TestTX3_RollbackFailure(AsyncTestCase, AsyncAdapters): +class TestTX3_RollbackFailure(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 DESCRIPTION = "Close the channel: rollback without a TX" def begin(self, channel): @@ -256,12 +308,12 @@ class TestTX3_RollbackFailure(AsyncTestCase, AsyncAdapters): def on_channel_closed(self, channel, reply_code, reply_text): self.stop() - def on_commitok(self, frame): + @staticmethod + def on_commitok(frame): raise AssertionError("Should not have received a Tx.RollbackOk") - -class TestZ_PublishAndConsume(BoundQueueTestCase, AsyncAdapters): +class TestZ_PublishAndConsume(BoundQueueTestCase, AsyncAdapters): # pylint: disable=C0103 DESCRIPTION = "Publish a message and consume it" def on_ready(self, frame): @@ -282,10 +334,11 @@ class TestZ_PublishAndConsume(BoundQueueTestCase, AsyncAdapters): -class TestZ_PublishAndConsumeBig(BoundQueueTestCase, AsyncAdapters): +class TestZ_PublishAndConsumeBig(BoundQueueTestCase, AsyncAdapters): # pylint: disable=C0103 DESCRIPTION = "Publish a big message and consume it" - def _get_msg_body(self): + @staticmethod + def _get_msg_body(): return '\n'.join(["%s" % i for i in range(0, 2097152)]) def on_ready(self, frame): @@ -305,7 +358,7 @@ class TestZ_PublishAndConsumeBig(BoundQueueTestCase, AsyncAdapters): self.channel.basic_cancel(self.on_cancelled, self.ctag) -class TestZ_PublishAndGet(BoundQueueTestCase, AsyncAdapters): +class TestZ_PublishAndGet(BoundQueueTestCase, AsyncAdapters): # pylint: disable=C0103 DESCRIPTION = "Publish a message and get it" def on_ready(self, frame): @@ -321,13 +374,14 @@ class TestZ_PublishAndGet(BoundQueueTestCase, AsyncAdapters): self.stop() -class TestZ_AccessDenied(AsyncTestCase, AsyncAdapters): +class TestZ_AccessDenied(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103 DESCRIPTION = "Verify that access denied invokes on open error callback" def start(self, *args, **kwargs): self.parameters.virtual_host = str(uuid.uuid4()) self.error_captured = False super(TestZ_AccessDenied, self).start(*args, **kwargs) + self.assertTrue(self.error_captured) def on_open_error(self, connection, error): self.error_captured = True @@ -336,7 +390,3 @@ class TestZ_AccessDenied(AsyncTestCase, AsyncAdapters): def on_open(self, connection): super(TestZ_AccessDenied, self).on_open(connection) self.stop() - - def tearDown(self): - self.assertTrue(self.error_captured) - super(TestZ_AccessDenied, self).tearDown() diff --git a/tests/acceptance/async_test_base.py b/tests/acceptance/async_test_base.py index 26c3e1a..eafe72f 100644 --- a/tests/acceptance/async_test_base.py +++ b/tests/acceptance/async_test_base.py @@ -1,3 +1,10 @@ +# Suppress pylint warnings concerning attribute defined outside __init__ +# pylint: disable=W0201 + +# Suppress pylint messages concerning missing docstrings +# pylint: disable=C0111 + +from datetime import datetime import select import logging try: @@ -6,7 +13,9 @@ except ImportError: import unittest import platform -target = platform.python_implementation() +_TARGET = platform.python_implementation() + +import uuid import pika from pika import adapters @@ -24,6 +33,9 @@ class AsyncTestCase(unittest.TestCase): 'amqp://guest:guest@localhost:5672/%2F') super(AsyncTestCase, self).setUp() + def tearDown(self): + self._stop() + def shortDescription(self): method_desc = super(AsyncTestCase, self).shortDescription() if self.DESCRIPTION: @@ -31,11 +43,12 @@ class AsyncTestCase(unittest.TestCase): else: return method_desc - def begin(self, channel): + def begin(self, channel): # pylint: disable=R0201,W0613 """Extend to start the actual tests on the channel""" - raise AssertionError("AsyncTestCase.begin_test not extended") + self.fail("AsyncTestCase.begin_test not extended") def start(self, adapter=None): + self.logger.info('start at %s', datetime.utcnow()) self.adapter = adapter or self.ADAPTER self.connection = self.adapter(self.parameters, self.on_open, @@ -53,19 +66,18 @@ class AsyncTestCase(unittest.TestCase): def _stop(self): if hasattr(self, 'timeout') and self.timeout: + self.logger.info("Removing timeout") self.connection.remove_timeout(self.timeout) self.timeout = None if hasattr(self, 'connection') and self.connection: + self.logger.info("Stopping ioloop") self.connection.ioloop.stop() self.connection = None - def tearDown(self): - self._stop() - def on_closed(self, connection, reply_code, reply_text): """called when the connection has finished closing""" - self.logger.debug('on_closed: %r %r %r', connection, - reply_code, reply_text) + self.logger.info('on_closed: %r %r %r', connection, + reply_code, reply_text) self._stop() def on_open(self, connection): @@ -73,29 +85,25 @@ class AsyncTestCase(unittest.TestCase): self.channel = connection.channel(self.begin) def on_open_error(self, connection, error): - self.logger.debug('on_open_error: %r %r', connection, error) + self.logger.error('on_open_error: %r %r', connection, error) connection.ioloop.stop() raise AssertionError('Error connecting to RabbitMQ') def on_timeout(self): """called when stuck waiting for connection to close""" + self.logger.info('on_timeout at %s', datetime.utcnow()) # force the ioloop to stop - self.logger.debug('on_timeout') + self.logger.debug('on_timeout called') self.connection.ioloop.stop() raise AssertionError('Test timed out') class BoundQueueTestCase(AsyncTestCase): - def tearDown(self): - """Cleanup auto-declared queue and exchange""" - self._cconn = self.adapter(self.parameters, self._on_cconn_open, - self._on_cconn_error, self._on_cconn_closed) - def start(self, adapter=None): # PY3 compat encoding - self.exchange = 'e' + str(id(self)) - self.queue = 'q' + str(id(self)) + self.exchange = 'e-' + self.__class__.__name__ + ':' + uuid.uuid1().hex + self.queue = 'q-' + self.__class__.__name__ + ':' + uuid.uuid1().hex self.routing_key = self.__class__.__name__ super(BoundQueueTestCase, self).start(adapter) @@ -106,82 +114,70 @@ class BoundQueueTestCase(AsyncTestCase): durable=False, auto_delete=True) - def on_exchange_declared(self, frame): + def on_exchange_declared(self, frame): # pylint: disable=W0613 self.channel.queue_declare(self.on_queue_declared, self.queue, passive=False, durable=False, exclusive=True, auto_delete=True, nowait=False, - arguments={'x-expires': self.TIMEOUT * 1000} - ) + arguments={'x-expires': self.TIMEOUT * 1000}) - def on_queue_declared(self, frame): + def on_queue_declared(self, frame): # pylint: disable=W0613 self.channel.queue_bind(self.on_ready, self.queue, self.exchange, self.routing_key) def on_ready(self, frame): raise NotImplementedError - def _on_cconn_closed(self, cconn, *args, **kwargs): - cconn.ioloop.stop() - self._cconn = None - - def _on_cconn_error(self, connection): - connection.ioloop.stop() - raise AssertionError('Error connecting to RabbitMQ') - - def _on_cconn_open(self, connection): - connection.channel(self._on_cconn_channel) - - def _on_cconn_channel(self, channel): - channel.exchange_delete(None, self.exchange, nowait=True) - channel.queue_delete(None, self.queue, nowait=True) - self._cconn.close() # # In order to write test cases that will tested using all the Async Adapters -# write a class that inherits both from one of TestCase classes above and +# write a class that inherits both from one of TestCase classes above and # from the AsyncAdapters class below. This allows you to avoid duplicating the # test methods for each adapter in each test class. # class AsyncAdapters(object): + def start(self, adapter_class): + raise NotImplementedError + def select_default_test(self): "SelectConnection:DefaultPoller" - select_connection.POLLER_TYPE=None + select_connection.POLLER_TYPE = None self.start(adapters.SelectConnection) def select_select_test(self): "SelectConnection:select" - select_connection.POLLER_TYPE='select' + select_connection.POLLER_TYPE = 'select' self.start(adapters.SelectConnection) - @unittest.skipIf(not hasattr(select, 'poll') - or not hasattr(select.poll(), 'modify'), "poll not supported") + @unittest.skipIf( + not hasattr(select, 'poll') or + not hasattr(select.poll(), 'modify'), "poll not supported") # pylint: disable=E1101 def select_poll_test(self): "SelectConnection:poll" - select_connection.POLLER_TYPE='poll' + select_connection.POLLER_TYPE = 'poll' self.start(adapters.SelectConnection) @unittest.skipIf(not hasattr(select, 'epoll'), "epoll not supported") def select_epoll_test(self): "SelectConnection:epoll" - select_connection.POLLER_TYPE='epoll' + select_connection.POLLER_TYPE = 'epoll' self.start(adapters.SelectConnection) @unittest.skipIf(not hasattr(select, 'kqueue'), "kqueue not supported") def select_kqueue_test(self): "SelectConnection:kqueue" - select_connection.POLLER_TYPE='kqueue' + select_connection.POLLER_TYPE = 'kqueue' self.start(adapters.SelectConnection) def tornado_test(self): "TornadoConnection" self.start(adapters.TornadoConnection) - @unittest.skipIf(target == 'PyPy', 'PyPy is not supported') + @unittest.skipIf(_TARGET == 'PyPy', 'PyPy is not supported') @unittest.skipIf(adapters.LibevConnection is None, 'pyev is not installed') def libev_test(self): "LibevConnection" diff --git a/tests/unit/channel_tests.py b/tests/unit/channel_tests.py index a80996a..57c5178 100644 --- a/tests/unit/channel_tests.py +++ b/tests/unit/channel_tests.py @@ -2,6 +2,10 @@ Tests for pika.channel.ContentFrameDispatcher """ + +# Disable pylint warning about Access to a protected member +# pylint: disable=W0212 + import collections import logging @@ -16,17 +20,35 @@ except ImportError: import unittest import warnings +from pika import callback from pika import channel +from pika import connection from pika import exceptions from pika import frame from pika import spec +class ConnectionTemplate(connection.Connection): + """Template for using as mock spec_set for the pika Connection class. It + defines members accessed by the code under test that would be defined in the + base class's constructor. + """ + callbacks = None + + # Suppress pylint warnings about specific abstract methods not being + # overridden + _adapter_connect = connection.Connection._adapter_connect + _adapter_disconnect = connection.Connection._adapter_disconnect + _flush_outbound = connection.Connection._flush_outbound + add_timeout = connection.Connection.add_timeout + remove_timeout = connection.Connection.remove_timeout + + class ChannelTests(unittest.TestCase): - @mock.patch('pika.connection.Connection') - def _create_connection(self, connection=None): - return connection + @mock.patch('pika.connection.Connection', autospec=ConnectionTemplate) + def _create_connection(self, connectionClassMock=None): + return connectionClassMock() def setUp(self): self.connection = self._create_connection() @@ -440,14 +462,12 @@ class ChannelTests(unittest.TestCase): self.obj.confirm_delivery(logging.debug) self.obj.callbacks.add.assert_called_with(*expectation, arguments=None) - def test_confirm_delivery_callback_with_nowait(self): + def test_confirm_delivery_callback_with_nowait_raises_value_error(self): self.obj._set_state(self.obj.OPEN) expectation = [self.obj.channel_number, spec.Confirm.SelectOk, self.obj._on_selectok] - self.obj.confirm_delivery(logging.debug, True) - self.assertNotIn(mock.call(*expectation, - arguments=None), - self.obj.callbacks.add.call_args_list) + with self.assertRaises(ValueError): + self.obj.confirm_delivery(logging.debug, True) def test_confirm_delivery_callback_basic_ack(self): self.obj._set_state(self.obj.OPEN) @@ -847,7 +867,6 @@ class ChannelTests(unittest.TestCase): def test_add_callbacks_basic_get_empty_added(self): self.obj._add_callbacks() - print(self.obj.callbacks.add.__dict__) self.obj.callbacks.add.assert_any_call(self.obj.channel_number, spec.Basic.GetEmpty, self.obj._on_getempty, False) @@ -1153,20 +1172,22 @@ class ChannelTests(unittest.TestCase): def test_rpc_raises_channel_closed(self): self.assertRaises(exceptions.ChannelClosed, self.obj._rpc, - frame.Method(self.obj.channel_number, - spec.Basic.Ack(1))) + spec.Basic.Cancel('tag_abc')) def test_rpc_while_blocking_appends_blocked_collection(self): self.obj._set_state(self.obj.OPEN) self.obj._blocking = spec.Confirm.Select() - expectation = [frame.Method(self.obj.channel_number, spec.Basic.Ack(1)), - 'Foo', None] + acceptable_replies = [ + (spec.Basic.CancelOk, {'consumer_tag': 'tag_abc'})] + expectation = [spec.Basic.Cancel('tag_abc'), lambda *args: None, + acceptable_replies] self.obj._rpc(*expectation) self.assertIn(expectation, self.obj._blocked) def test_rpc_throws_value_error_with_unacceptable_replies(self): self.obj._set_state(self.obj.OPEN) - self.assertRaises(TypeError, self.obj._rpc, spec.Basic.Ack(1), + self.assertRaises(TypeError, self.obj._rpc, + spec.Basic.Cancel('tag_abc'), logging.debug, 'Foo') def test_rpc_throws_type_error_with_invalid_callback(self): @@ -1174,15 +1195,27 @@ class ChannelTests(unittest.TestCase): self.assertRaises(TypeError, self.obj._rpc, spec.Channel.Open(1), ['foo'], [spec.Channel.OpenOk]) - def test_rpc_adds_on_synchronous_complete(self): + def test_rpc_enters_blocking_and_adds_on_synchronous_complete(self): self.obj._set_state(self.obj.OPEN) method_frame = spec.Channel.Open() self.obj._rpc(method_frame, None, [spec.Channel.OpenOk]) + self.assertEqual(self.obj._blocking, method_frame.NAME) self.obj.callbacks.add.assert_called_with( self.obj.channel_number, spec.Channel.OpenOk, self.obj._on_synchronous_complete, arguments=None) + def test_rpc_not_blocking_and_no_on_synchronous_complete_when_no_replies(self): + self.obj._set_state(self.obj.OPEN) + method_frame = spec.Channel.Open() + self.obj._rpc(method_frame, None, acceptable_replies=[]) + self.assertIsNone(self.obj._blocking) + with self.assertRaises(AssertionError): + self.obj.callbacks.add.assert_called_with( + mock.ANY, mock.ANY, + self.obj._on_synchronous_complete, + arguments=mock.ANY) + def test_rpc_adds_callback(self): self.obj._set_state(self.obj.OPEN) method_frame = spec.Channel.Open()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 2 }
0.10
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "yapf", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc libev-dev" ], "python": "3.5", "reqs_path": [ "test-requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 charset-normalizer==2.0.12 codecov==2.1.13 coverage==6.2 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 mock==5.2.0 nose==1.3.7 packaging==21.3 -e git+https://github.com/pika/pika.git@d8a782d97579cd96ed67ccfb55f63ca8fdafa199#egg=pika pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 requests==2.27.1 tomli==1.2.3 tornado==6.1 Twisted==15.3.0 typing_extensions==4.1.1 urllib3==1.26.20 yapf==0.32.0 zipp==3.6.0 zope.interface==5.5.2
name: pika channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - charset-normalizer==2.0.12 - codecov==2.1.13 - coverage==6.2 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - mock==5.2.0 - nose==1.3.7 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - requests==2.27.1 - tomli==1.2.3 - tornado==6.1 - twisted==15.3.0 - typing-extensions==4.1.1 - urllib3==1.26.20 - yapf==0.32.0 - zipp==3.6.0 - zope-interface==5.5.2 prefix: /opt/conda/envs/pika
[ "tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_callback_with_nowait_raises_value_error", "tests/unit/channel_tests.py::ChannelTests::test_rpc_not_blocking_and_no_on_synchronous_complete_when_no_replies" ]
[]
[ "tests/unit/channel_tests.py::ChannelTests::test_add_callback", "tests/unit/channel_tests.py::ChannelTests::test_add_callback_multiple_replies", "tests/unit/channel_tests.py::ChannelTests::test_add_callbacks_basic_cancel_empty_added", "tests/unit/channel_tests.py::ChannelTests::test_add_callbacks_basic_get_empty_added", "tests/unit/channel_tests.py::ChannelTests::test_add_callbacks_channel_close_added", "tests/unit/channel_tests.py::ChannelTests::test_add_callbacks_channel_flow_added", "tests/unit/channel_tests.py::ChannelTests::test_add_on_cancel_callback", "tests/unit/channel_tests.py::ChannelTests::test_add_on_close_callback", "tests/unit/channel_tests.py::ChannelTests::test_add_on_flow_callback", "tests/unit/channel_tests.py::ChannelTests::test_add_on_return_callback", "tests/unit/channel_tests.py::ChannelTests::test_basic_ack_channel_closed", "tests/unit/channel_tests.py::ChannelTests::test_basic_cancel_callback_appended", "tests/unit/channel_tests.py::ChannelTests::test_basic_cancel_calls_validate", "tests/unit/channel_tests.py::ChannelTests::test_basic_cancel_channel_cancelled_appended", "tests/unit/channel_tests.py::ChannelTests::test_basic_cancel_no_consumer_tag", "tests/unit/channel_tests.py::ChannelTests::test_basic_cancel_on_cancel_appended", "tests/unit/channel_tests.py::ChannelTests::test_basic_cancel_raises_value_error", "tests/unit/channel_tests.py::ChannelTests::test_basic_cancel_then_close", "tests/unit/channel_tests.py::ChannelTests::test_basic_consume_calls_validate", "tests/unit/channel_tests.py::ChannelTests::test_basic_consume_channel_closed", "tests/unit/channel_tests.py::ChannelTests::test_basic_consume_consumer_tag", "tests/unit/channel_tests.py::ChannelTests::test_basic_consume_consumer_tag_cancelled_full", "tests/unit/channel_tests.py::ChannelTests::test_basic_consume_consumer_tag_in_consumers", "tests/unit/channel_tests.py::ChannelTests::test_basic_consume_consumers_callback_value", "tests/unit/channel_tests.py::ChannelTests::test_basic_consume_consumers_pending_list_is_empty", "tests/unit/channel_tests.py::ChannelTests::test_basic_consume_consumers_rpc_called", "tests/unit/channel_tests.py::ChannelTests::test_basic_consume_duplicate_consumer_tag_raises", "tests/unit/channel_tests.py::ChannelTests::test_basic_consume_has_pending_list", "tests/unit/channel_tests.py::ChannelTests::test_basic_get_callback", "tests/unit/channel_tests.py::ChannelTests::test_basic_get_calls_validate", "tests/unit/channel_tests.py::ChannelTests::test_basic_get_send_method_called", "tests/unit/channel_tests.py::ChannelTests::test_basic_nack_raises_channel_closed", "tests/unit/channel_tests.py::ChannelTests::test_basic_nack_send_method_request", "tests/unit/channel_tests.py::ChannelTests::test_basic_publish_raises_channel_closed", "tests/unit/channel_tests.py::ChannelTests::test_basic_publish_send_method_request", "tests/unit/channel_tests.py::ChannelTests::test_basic_qos_raises_channel_closed", "tests/unit/channel_tests.py::ChannelTests::test_basic_qos_rpc_request", "tests/unit/channel_tests.py::ChannelTests::test_basic_recover_raises_channel_closed", "tests/unit/channel_tests.py::ChannelTests::test_basic_recover_rpc_request", "tests/unit/channel_tests.py::ChannelTests::test_basic_reject_raises_channel_closed", "tests/unit/channel_tests.py::ChannelTests::test_basic_reject_send_method_request", "tests/unit/channel_tests.py::ChannelTests::test_basic_send_method_calls_rpc", "tests/unit/channel_tests.py::ChannelTests::test_channel_open_add_callbacks_called", "tests/unit/channel_tests.py::ChannelTests::test_cleanup", "tests/unit/channel_tests.py::ChannelTests::test_close_basic_cancel_called", "tests/unit/channel_tests.py::ChannelTests::test_close_raises_channel_closed", "tests/unit/channel_tests.py::ChannelTests::test_close_state", "tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_callback_basic_ack", "tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_callback_basic_nack", "tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_callback_without_nowait_selectok", "tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_no_callback_callback_call_count", "tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_no_callback_no_basic_ack_callback", "tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_no_callback_no_basic_nack_callback", "tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_raises_channel_closed", "tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_raises_method_not_implemented_for_confirms", "tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_raises_method_not_implemented_for_nack", "tests/unit/channel_tests.py::ChannelTests::test_consumer_tags", "tests/unit/channel_tests.py::ChannelTests::test_exchange_bind_raises_channel_closed", "tests/unit/channel_tests.py::ChannelTests::test_exchange_bind_raises_value_error_on_invalid_callback", "tests/unit/channel_tests.py::ChannelTests::test_exchange_bind_rpc_request", "tests/unit/channel_tests.py::ChannelTests::test_exchange_bind_rpc_request_nowait", "tests/unit/channel_tests.py::ChannelTests::test_exchange_declare_raises_channel_closed", "tests/unit/channel_tests.py::ChannelTests::test_exchange_declare_raises_value_error_on_invalid_callback", "tests/unit/channel_tests.py::ChannelTests::test_exchange_declare_rpc_request", "tests/unit/channel_tests.py::ChannelTests::test_exchange_declare_rpc_request_nowait", "tests/unit/channel_tests.py::ChannelTests::test_exchange_declare_with_type_arg_assigns_to_exchange_type", "tests/unit/channel_tests.py::ChannelTests::test_exchange_declare_with_type_arg_raises_deprecation_warning", "tests/unit/channel_tests.py::ChannelTests::test_exchange_delete_raises_channel_closed", "tests/unit/channel_tests.py::ChannelTests::test_exchange_delete_raises_value_error_on_invalid_callback", "tests/unit/channel_tests.py::ChannelTests::test_exchange_delete_rpc_request", "tests/unit/channel_tests.py::ChannelTests::test_exchange_delete_rpc_request_nowait", "tests/unit/channel_tests.py::ChannelTests::test_exchange_unbind_raises_channel_closed", "tests/unit/channel_tests.py::ChannelTests::test_exchange_unbind_raises_value_error_on_invalid_callback", "tests/unit/channel_tests.py::ChannelTests::test_exchange_unbind_rpc_request", "tests/unit/channel_tests.py::ChannelTests::test_exchange_unbind_rpc_request_nowait", "tests/unit/channel_tests.py::ChannelTests::test_flow_off_rpc_request", "tests/unit/channel_tests.py::ChannelTests::test_flow_on_flowok_callback", "tests/unit/channel_tests.py::ChannelTests::test_flow_on_rpc_request", "tests/unit/channel_tests.py::ChannelTests::test_flow_raises_channel_closed", "tests/unit/channel_tests.py::ChannelTests::test_flow_raises_invalid_callback", "tests/unit/channel_tests.py::ChannelTests::test_get_pending_message", "tests/unit/channel_tests.py::ChannelTests::test_get_pending_message_item_popped", "tests/unit/channel_tests.py::ChannelTests::test_handle_content_frame_basic_deliver_called", "tests/unit/channel_tests.py::ChannelTests::test_handle_content_frame_basic_get_called", "tests/unit/channel_tests.py::ChannelTests::test_handle_content_frame_basic_return_called", "tests/unit/channel_tests.py::ChannelTests::test_handle_content_frame_method_returns_none", "tests/unit/channel_tests.py::ChannelTests::test_handle_content_frame_sets_header_frame", "tests/unit/channel_tests.py::ChannelTests::test_handle_content_frame_sets_method_frame", "tests/unit/channel_tests.py::ChannelTests::test_has_content_false", "tests/unit/channel_tests.py::ChannelTests::test_has_content_true", "tests/unit/channel_tests.py::ChannelTests::test_immediate_called_logger_warning", "tests/unit/channel_tests.py::ChannelTests::test_init_blocked", "tests/unit/channel_tests.py::ChannelTests::test_init_blocking", "tests/unit/channel_tests.py::ChannelTests::test_init_callbacks", "tests/unit/channel_tests.py::ChannelTests::test_init_cancelled", "tests/unit/channel_tests.py::ChannelTests::test_init_channel_number", "tests/unit/channel_tests.py::ChannelTests::test_init_connection", "tests/unit/channel_tests.py::ChannelTests::test_init_consumers", "tests/unit/channel_tests.py::ChannelTests::test_init_frame_dispatcher", "tests/unit/channel_tests.py::ChannelTests::test_init_has_on_flow_callback", "tests/unit/channel_tests.py::ChannelTests::test_init_invalid_channel_number", "tests/unit/channel_tests.py::ChannelTests::test_init_on_flowok_callback", "tests/unit/channel_tests.py::ChannelTests::test_init_on_getok_callback", "tests/unit/channel_tests.py::ChannelTests::test_init_on_openok_callback", "tests/unit/channel_tests.py::ChannelTests::test_init_pending", "tests/unit/channel_tests.py::ChannelTests::test_init_state", "tests/unit/channel_tests.py::ChannelTests::test_is_closed_false", "tests/unit/channel_tests.py::ChannelTests::test_is_closed_true", "tests/unit/channel_tests.py::ChannelTests::test_is_closing_false", "tests/unit/channel_tests.py::ChannelTests::test_is_closing_true", "tests/unit/channel_tests.py::ChannelTests::test_on_cancel_not_appended_cancelled", "tests/unit/channel_tests.py::ChannelTests::test_on_cancel_removed_consumer", "tests/unit/channel_tests.py::ChannelTests::test_on_cancelok_removed_consumer", "tests/unit/channel_tests.py::ChannelTests::test_on_cancelok_removed_pending", "tests/unit/channel_tests.py::ChannelTests::test_on_close_warning", "tests/unit/channel_tests.py::ChannelTests::test_on_confirm_selectok", "tests/unit/channel_tests.py::ChannelTests::test_on_deliver_callback_called", "tests/unit/channel_tests.py::ChannelTests::test_on_deliver_pending_callbacks_called", "tests/unit/channel_tests.py::ChannelTests::test_on_deliver_pending_called", "tests/unit/channel_tests.py::ChannelTests::test_on_eventok", "tests/unit/channel_tests.py::ChannelTests::test_on_flow", "tests/unit/channel_tests.py::ChannelTests::test_on_flow_with_callback", "tests/unit/channel_tests.py::ChannelTests::test_on_flowok", "tests/unit/channel_tests.py::ChannelTests::test_on_flowok_callback_reset", "tests/unit/channel_tests.py::ChannelTests::test_on_flowok_calls_callback", "tests/unit/channel_tests.py::ChannelTests::test_on_getempty", "tests/unit/channel_tests.py::ChannelTests::test_on_getok_callback_called", "tests/unit/channel_tests.py::ChannelTests::test_on_getok_callback_reset", "tests/unit/channel_tests.py::ChannelTests::test_on_getok_no_callback", "tests/unit/channel_tests.py::ChannelTests::test_on_openok_callback_called", "tests/unit/channel_tests.py::ChannelTests::test_on_openok_no_callback", "tests/unit/channel_tests.py::ChannelTests::test_on_synchronous_complete", "tests/unit/channel_tests.py::ChannelTests::test_onreturn", "tests/unit/channel_tests.py::ChannelTests::test_onreturn_warning", "tests/unit/channel_tests.py::ChannelTests::test_queue_bind_raises_channel_closed", "tests/unit/channel_tests.py::ChannelTests::test_queue_bind_raises_value_error_on_invalid_callback", "tests/unit/channel_tests.py::ChannelTests::test_queue_bind_rpc_request", "tests/unit/channel_tests.py::ChannelTests::test_queue_bind_rpc_request_nowait", "tests/unit/channel_tests.py::ChannelTests::test_queue_declare_raises_channel_closed", "tests/unit/channel_tests.py::ChannelTests::test_queue_declare_raises_value_error_on_invalid_callback", "tests/unit/channel_tests.py::ChannelTests::test_queue_declare_rpc_request", "tests/unit/channel_tests.py::ChannelTests::test_queue_declare_rpc_request_nowait", "tests/unit/channel_tests.py::ChannelTests::test_queue_delete_raises_channel_closed", "tests/unit/channel_tests.py::ChannelTests::test_queue_delete_raises_value_error_on_invalid_callback", "tests/unit/channel_tests.py::ChannelTests::test_queue_delete_rpc_request", "tests/unit/channel_tests.py::ChannelTests::test_queue_delete_rpc_request_nowait", "tests/unit/channel_tests.py::ChannelTests::test_queue_purge_raises_channel_closed", "tests/unit/channel_tests.py::ChannelTests::test_queue_purge_raises_value_error_on_invalid_callback", "tests/unit/channel_tests.py::ChannelTests::test_queue_purge_rpc_request", "tests/unit/channel_tests.py::ChannelTests::test_queue_purge_rpc_request_nowait", "tests/unit/channel_tests.py::ChannelTests::test_queue_unbind_raises_channel_closed", "tests/unit/channel_tests.py::ChannelTests::test_queue_unbind_raises_value_error_on_invalid_callback", "tests/unit/channel_tests.py::ChannelTests::test_queue_unbind_rpc_request", "tests/unit/channel_tests.py::ChannelTests::test_rpc_adds_callback", "tests/unit/channel_tests.py::ChannelTests::test_rpc_enters_blocking_and_adds_on_synchronous_complete", "tests/unit/channel_tests.py::ChannelTests::test_rpc_raises_channel_closed", "tests/unit/channel_tests.py::ChannelTests::test_rpc_throws_type_error_with_invalid_callback", "tests/unit/channel_tests.py::ChannelTests::test_rpc_throws_value_error_with_unacceptable_replies", "tests/unit/channel_tests.py::ChannelTests::test_rpc_while_blocking_appends_blocked_collection", "tests/unit/channel_tests.py::ChannelTests::test_send_method", "tests/unit/channel_tests.py::ChannelTests::test_set_state", "tests/unit/channel_tests.py::ChannelTests::test_tx_commit_raises_channel_closed", "tests/unit/channel_tests.py::ChannelTests::test_tx_commit_rpc_request", "tests/unit/channel_tests.py::ChannelTests::test_tx_rollback_rpc_request", "tests/unit/channel_tests.py::ChannelTests::test_tx_select_rpc_request", "tests/unit/channel_tests.py::ChannelTests::test_validate_channel_and_callback_raises_channel_closed", "tests/unit/channel_tests.py::ChannelTests::test_validate_channel_and_callback_raises_value_error_not_callable" ]
[]
BSD 3-Clause "New" or "Revised" License
342
jupyter-incubator__sparkmagic-84
3c0230b30b1f63780fbf2d4ba9d83e0a83f51eea
2015-12-19 08:44:24
3c0230b30b1f63780fbf2d4ba9d83e0a83f51eea
diff --git a/remotespark/default_config.json b/remotespark/default_config.json index 3be83bf..ea4adaa 100644 --- a/remotespark/default_config.json +++ b/remotespark/default_config.json @@ -47,5 +47,10 @@ "fatal_error_suggestion": "The code failed because of a fatal error:\n\t{}.\n\nSome things to try:\na) Make sure Spark has enough available resources for Jupyter to create a Spark context.\nb) Contact your Jupyter administrator to make sure the Spark magics library is configured correctly.\nc) Restart the kernel.", - "ignore_ssl_errors": false + "ignore_ssl_errors": false, + + "session_configs": { + "driverMemory": "1000M", + "executorCores": 2 + } } diff --git a/remotespark/livyclientlib/clientmanagerstateserializer.py b/remotespark/livyclientlib/clientmanagerstateserializer.py index 8a04bce..f4615ea 100644 --- a/remotespark/livyclientlib/clientmanagerstateserializer.py +++ b/remotespark/livyclientlib/clientmanagerstateserializer.py @@ -38,11 +38,11 @@ class ClientManagerStateSerializer(object): name = client["name"] session_id = client["id"] sql_context_created = client["sqlcontext"] - language = client["language"] + kind = client["kind"].lower() connection_string = client["connectionstring"] session = self._client_factory.create_session( - language, connection_string, session_id, sql_context_created) + connection_string, session_id, sql_context_created, {"kind": kind}) # Do not start session automatically. Just create it but skip is not existent. try: @@ -50,7 +50,7 @@ class ClientManagerStateSerializer(object): status = session.status if not session.is_final_status(status): self.logger.debug("Adding session {}".format(session_id)) - client_obj = self._client_factory.build_client(language, session) + client_obj = self._client_factory.build_client(session) clients_to_return.append((name, client_obj)) else: self.logger.error("Skipping serialized session '{}' because session was in status {}." diff --git a/remotespark/livyclientlib/livyclient.py b/remotespark/livyclientlib/livyclient.py index 6632b2a..f216c44 100644 --- a/remotespark/livyclientlib/livyclient.py +++ b/remotespark/livyclientlib/livyclient.py @@ -34,8 +34,8 @@ class LivyClient(object): self._session.delete() @property - def language(self): - return self._session.language + def kind(self): + return self._session.kind @property def session_id(self): diff --git a/remotespark/livyclientlib/livyclientfactory.py b/remotespark/livyclientlib/livyclientfactory.py index 7599ddd..f7b2ae3 100644 --- a/remotespark/livyclientlib/livyclientfactory.py +++ b/remotespark/livyclientlib/livyclientfactory.py @@ -17,23 +17,24 @@ class LivyClientFactory(object): self.logger = Log("LivyClientFactory") self.max_results = 2500 - def build_client(self, language, session): + def build_client(self, session): assert session is not None + kind = session.kind - if language == Constants.lang_python: + if kind == Constants.session_kind_pyspark: return PandasPysparkLivyClient(session, self.max_results) - elif language == Constants.lang_scala: + elif kind == Constants.session_kind_spark: return PandasScalaLivyClient(session, self.max_results) else: - raise ValueError("Language '{}' is not supported.".format(language)) + raise ValueError("Kind '{}' is not supported.".format(kind)) @staticmethod - def create_session(language, connection_string, session_id="-1", sql_created=False): + def create_session(connection_string, properties, session_id="-1", sql_created=False): cso = get_connection_string_elements(connection_string) retry_policy = LinearRetryPolicy(seconds_to_sleep=5, max_retries=5) http_client = LivyReliableHttpClient(cso.url, cso.username, cso.password, retry_policy) - session = LivySession(http_client, language, session_id, sql_created) + session = LivySession(http_client, session_id, sql_created, properties) return session diff --git a/remotespark/livyclientlib/livysession.py b/remotespark/livyclientlib/livysession.py index 247ab84..7c8efaf 100644 --- a/remotespark/livyclientlib/livysession.py +++ b/remotespark/livyclientlib/livysession.py @@ -7,7 +7,6 @@ from time import sleep, time import remotespark.utils.configuration as conf from remotespark.utils.constants import Constants from remotespark.utils.log import Log -from remotespark.utils.utils import get_instance_id from .livyclienttimeouterror import LivyClientTimeoutError from .livyunexpectedstatuserror import LivyUnexpectedStatusError from .livysessionstate import LivySessionState @@ -15,9 +14,12 @@ from .livysessionstate import LivySessionState class LivySession(object): """Session that is livy specific.""" - # TODO(aggftw): make threadsafe - def __init__(self, http_client, language, session_id, sql_created): + def __init__(self, http_client, session_id, sql_created, properties): + assert "kind" in properties.keys() + kind = properties["kind"] + self.properties = properties + status_sleep_seconds = conf.status_sleep_seconds() statement_sleep_seconds = conf.statement_sleep_seconds() create_sql_context_timeout_seconds = conf.create_sql_context_timeout_seconds() @@ -30,10 +32,10 @@ class LivySession(object): self.logger = Log("LivySession") - language = language.lower() - if language not in Constants.lang_supported: - raise ValueError("Session of language '{}' not supported. Session must be of languages {}." - .format(language, ", ".join(Constants.lang_supported))) + kind = kind.lower() + if kind not in Constants.session_kinds_supported: + raise ValueError("Session of kind '{}' not supported. Session must be of kinds {}." + .format(kind, ", ".join(Constants.session_kinds_supported))) if session_id == "-1": self._status = Constants.not_started_session_status @@ -41,45 +43,44 @@ class LivySession(object): else: self._status = Constants.busy_session_status + self._logs = "" self._http_client = http_client self._status_sleep_seconds = status_sleep_seconds self._statement_sleep_seconds = statement_sleep_seconds self._create_sql_context_timeout_seconds = create_sql_context_timeout_seconds self._state = LivySessionState(session_id, http_client.connection_string, - language, sql_created) + kind, sql_created) def get_state(self): return self._state def start(self): """Start the session against actual livy server.""" - # TODO(aggftw): do a pass to make all contracts variables; i.e. not peppered in code - self.logger.debug("Starting '{}' session.".format(self.language)) + self.logger.debug("Starting '{}' session.".format(self.kind)) - app_name = "remotesparkmagics_{}".format(get_instance_id()) - r = self._http_client.post("/sessions", [201], {"kind": self._get_livy_kind(), "name": app_name}) + r = self._http_client.post("/sessions", [201], self.properties) self._state.session_id = str(r.json()["id"]) self._status = str(r.json()["state"]) - self.logger.debug("Session '{}' started.".format(self.language)) + self.logger.debug("Session '{}' started.".format(self.kind)) def create_sql_context(self): """Create a sqlContext object on the session. Object will be accessible via variable 'sqlContext'.""" if self.started_sql_context: return - self.logger.debug("Starting '{}' sql and hive session.".format(self.language)) + self.logger.debug("Starting '{}' sql and hive session.".format(self.kind)) - self._create_context("sql") - self._create_context("hive") + self._create_context(Constants.context_name_sql) + self._create_context(Constants.context_name_hive) self._state.sql_context_created = True def _create_context(self, context_type): - if context_type == "sql": + if context_type == Constants.context_name_sql: command = self._get_sql_context_creation_command() - elif context_type == "hive": + elif context_type == Constants.context_name_hive: command = self._get_hive_context_creation_command() else: raise ValueError("Cannot create context of type {}.".format(context_type)) @@ -87,7 +88,7 @@ class LivySession(object): try: self.wait_for_idle(self._create_sql_context_timeout_seconds) self.execute(command) - self.logger.debug("Started '{}' {} session.".format(self.language, context_type)) + self.logger.debug("Started '{}' {} session.".format(self.kind, context_type)) except LivyClientTimeoutError: raise LivyClientTimeoutError("Failed to create the {} context in time. Timed out after {} seconds." .format(context_type, self._create_sql_context_timeout_seconds)) @@ -101,8 +102,8 @@ class LivySession(object): return self._state.sql_context_created @property - def language(self): - return self._state.language + def kind(self): + return self._state.kind def refresh_status(self): (status, logs) = self._get_latest_status_and_logs() @@ -124,7 +125,6 @@ class LivySession(object): return status in Constants.final_status def execute(self, commands): - """Executes commands in session.""" code = textwrap.dedent(commands) data = {"code": code} @@ -134,7 +134,6 @@ class LivySession(object): return self._get_statement_output(statement_id) def delete(self): - """Deletes the session and releases any resources.""" self.logger.debug("Deleting session '{}'".format(self.id)) if self._status != Constants.not_started_session_status and self._status != Constants.dead_session_status: @@ -147,7 +146,11 @@ class LivySession(object): def wait_for_idle(self, seconds_to_wait): """Wait for session to go to idle status. Sleep meanwhile. Calls done every status_sleep_seconds as - indicated by the constructor.""" + indicated by the constructor. + + Parameters: + seconds_to_wait : number of seconds to wait before giving up. + """ self.refresh_status() current_status = self._status @@ -187,11 +190,11 @@ class LivySession(object): .format(self.id, len(filtered_sessions))) session = filtered_sessions[0] - return (session['state'], session['log']) + return session['state'], session['log'] def _get_statement_output(self, statement_id): statement_running = True - output = "" + out = "" while statement_running: r = self._http_client.get(self._statements_url(), [200]) statement = [i for i in r.json()["statements"] if i["id"] == statement_id][0] @@ -208,36 +211,31 @@ class LivySession(object): if statement_output["status"] == "ok": out = (True, statement_output["data"]["text/plain"]) elif statement_output["status"] == "error": - out = (False, statement_output["evalue"] + "\n" + \ - "".join(statement_output["traceback"])) - return out + out = (False, statement_output["evalue"] + "\n" + + "".join(statement_output["traceback"])) + else: + raise ValueError("Unknown output status: '{}'".format(statement_output["status"])) - def _get_livy_kind(self): - if self.language == Constants.lang_scala: - return Constants.session_kind_spark - elif self.language == Constants.lang_python: - return Constants.session_kind_pyspark - else: - raise ValueError("Cannot get session kind for {}.".format(self.language)) + return out def _get_sql_context_creation_command(self): - if self.language == Constants.lang_scala: + if self.kind == Constants.session_kind_spark: sql_context_command = "val sqlContext = new org.apache.spark.sql.SQLContext(sc)\n" \ "import sqlContext.implicits._" - elif self.language == Constants.lang_python: + elif self.kind == Constants.session_kind_pyspark: sql_context_command = "from pyspark.sql import SQLContext\nfrom pyspark.sql.types import *\n" \ "sqlContext = SQLContext(sc)" else: - raise ValueError("Do not know how to create sqlContext in session of language {}.".format(self.language)) + raise ValueError("Do not know how to create sqlContext in session of kind {}.".format(self.kind)) return sql_context_command def _get_hive_context_creation_command(self): - if self.language == Constants.lang_scala: + if self.kind == Constants.session_kind_spark: hive_context_command = "val hiveContext = new org.apache.spark.sql.hive.HiveContext(sc)" - elif self.language == Constants.lang_python: + elif self.kind == Constants.session_kind_pyspark: hive_context_command = "from pyspark.sql import HiveContext\nhiveContext = HiveContext(sc)" else: - raise ValueError("Do not know how to create hiveContext in session of language {}.".format(self.language)) + raise ValueError("Do not know how to create hiveContext in session of kind {}.".format(self.kind)) return hive_context_command diff --git a/remotespark/livyclientlib/livysessionstate.py b/remotespark/livyclientlib/livysessionstate.py index 66654f1..c86dc97 100644 --- a/remotespark/livyclientlib/livysessionstate.py +++ b/remotespark/livyclientlib/livysessionstate.py @@ -3,9 +3,9 @@ class LivySessionState(object): - def __init__(self, session_id, connection_string, language, sql_context_created, version="0.0.0"): + def __init__(self, session_id, connection_string, kind, sql_context_created, version="0.0.0"): self._session_id = session_id - self._language = language + self._kind = kind self._sql_context_created = sql_context_created self._version = version self._connection_string = connection_string @@ -19,8 +19,8 @@ class LivySessionState(object): self._session_id = value @property - def language(self): - return self._language + def kind(self): + return self._kind @property def sql_context_created(self): @@ -39,5 +39,5 @@ class LivySessionState(object): return self._connection_string def to_dict(self): - return {"id": self.session_id, "language": self.language, "sqlcontext": self.sql_context_created, + return {"id": self.session_id, "kind": self.kind, "sqlcontext": self.sql_context_created, "version": self.version, "connectionstring": self.connection_string} diff --git a/remotespark/livyclientlib/sparkcontroller.py b/remotespark/livyclientlib/sparkcontroller.py index b3aa9d0..b5ef94a 100644 --- a/remotespark/livyclientlib/sparkcontroller.py +++ b/remotespark/livyclientlib/sparkcontroller.py @@ -38,14 +38,14 @@ class SparkController(object): def delete_session(self, name): self.client_manager.delete_client(name) - def add_session(self, name, language, connection_string, skip_if_exists): + def add_session(self, name, connection_string, skip_if_exists, properties): if skip_if_exists and (name in self.client_manager.get_sessions_list()): self.logger.debug("Skipping {} because it already exists in list of sessions.".format(name)) return - session = self.client_factory.create_session(language, connection_string, "-1", False) + session = self.client_factory.create_session(connection_string, properties, "-1", False) session.start() - livy_client = self.client_factory.build_client(language, session) + livy_client = self.client_factory.build_client(session) self.client_manager.add_client(name, livy_client) def get_client_keys(self): diff --git a/remotespark/remotesparkmagics.py b/remotespark/remotesparkmagics.py index ee17f21..821eef8 100644 --- a/remotespark/remotesparkmagics.py +++ b/remotespark/remotesparkmagics.py @@ -7,6 +7,8 @@ Provides the %spark magic.""" from __future__ import print_function from IPython.core.magic import Magics, magics_class, line_cell_magic, needs_local_scope from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring +import json +import copy import remotespark.utils.configuration as conf from remotespark.utils.constants import Constants @@ -41,6 +43,8 @@ class RemoteSparkMagics(Magics): except KeyError: self.logger.error("Could not read env vars for serialization.") + self.properties = conf.session_configs() + self.logger.debug("Initialized spark magics.") @magic_arguments() @@ -57,7 +61,7 @@ class RemoteSparkMagics(Magics): @argument("command", type=str, default=[""], nargs="*", help="Commands to execute.") @needs_local_scope @line_cell_magic - def spark(self, line, cell="", local_ns={}): + def spark(self, line, cell="", local_ns=None): """Magic to execute spark remotely. This magic allows you to create a Livy Scala or Python session against a Livy endpoint. Every session can @@ -68,21 +72,28 @@ class RemoteSparkMagics(Magics): Subcommands ----------- info - Display the mode and available Livy sessions. + Display the available Livy sessions and other configurations for sessions. add Add a Livy session. First argument is the name of the session, second argument is the language, and third argument is the connection string of the Livy endpoint. A fourth argument specifying if session creation can be skipped if it already exists is optional: "skip" or empty. - e.g. `%%spark add test python url=https://sparkcluster.example.net/livy;username=admin;password=MyPassword skip` + e.g. `%%spark add test python url=https://sparkcluster.net/livy;username=u;password=p skip` or - e.g. `%%spark add test python url=https://sparkcluster.example.net/livy;username=admin;password=MyPassword` + e.g. `%%spark add test python url=https://sparkcluster.net/livy;username=u;password=p` + config + Override the livy session properties sent to Livy on session creation. All session creations will + contain these config settings from then on. + Expected value is a JSON key-value string to be sent as part of the Request Body for the POST /sessions + endpoint in Livy. + e.g. `%%spark config {"driverMemory":"1000M", "executorCores":4}` run Run Spark code against a session. e.g. `%%spark -e testsession` will execute the cell code against the testsession previously created e.g. `%%spark -e testsession -c sql` will execute the SQL code against the testsession previously created - e.g. `%%spark -e testsession -c sql -o my_var` will execute the SQL code against the testsession previously - created and store the pandas dataframe created in the my_var variable in the Python environment + e.g. `%%spark -e testsession -c sql -o my_var` will execute the SQL code against the testsession + previously created and store the pandas dataframe created in the my_var variable in the + Python environment. delete Delete a Livy session. Argument is the name of the session to be deleted. e.g. `%%spark delete defaultlivy` @@ -99,18 +110,29 @@ class RemoteSparkMagics(Magics): # info if subcommand == "info": self._print_info() + # config + elif subcommand == "config": + # Would normally do " ".join(args.command[1:]) but parse_argstring removes quotes... + rest_of_line = user_input[7:] + self.properties = json.loads(rest_of_line) # add elif subcommand == "add": if len(args.command) != 4 and len(args.command) != 5: raise ValueError("Subcommand 'add' requires three or four arguments. {}".format(usage)) + name = args.command[1].lower() - language = args.command[2] + language = args.command[2].lower() connection_string = args.command[3] + if len(args.command) == 5: skip = args.command[4].lower() == "skip" else: skip = False - self.spark_controller.add_session(name, language, connection_string, skip) + + properties = copy.deepcopy(self.properties) + properties["kind"] = self._get_livy_kind(language) + + self.spark_controller.add_session(name, connection_string, skip, properties) # delete elif subcommand == "delete": if len(args.command) != 2: @@ -151,7 +173,21 @@ class RemoteSparkMagics(Magics): return None def _print_info(self): - print("Info for running Spark:\n\t{}\n".format(self.spark_controller.get_client_keys())) + print("""Info for running Spark: + Sessions: + {} + Session configs: + {} +""".format(self.spark_controller.get_client_keys(), self.properties)) + + @staticmethod + def _get_livy_kind(language): + if language == Constants.lang_scala: + return Constants.session_kind_spark + elif language == Constants.lang_python: + return Constants.session_kind_pyspark + else: + raise ValueError("Cannot get session kind for {}.".format(language)) def load_ipython_extension(ip): diff --git a/remotespark/utils/configuration.py b/remotespark/utils/configuration.py index cdef740..df09e52 100644 --- a/remotespark/utils/configuration.py +++ b/remotespark/utils/configuration.py @@ -68,6 +68,10 @@ def _override(f): # value of that configuration if there is any such configuration. Otherwise, # these functions return the default values described in their bodies. +@_override +def session_configs(): + return {} + @_override def serialize(): return False diff --git a/remotespark/utils/constants.py b/remotespark/utils/constants.py index 197bd65..6d6a1cc 100644 --- a/remotespark/utils/constants.py +++ b/remotespark/utils/constants.py @@ -7,6 +7,7 @@ class Constants: session_kind_spark = "spark" session_kind_pyspark = "pyspark" + session_kinds_supported = [session_kind_spark, session_kind_pyspark] context_name_spark = "spark" context_name_sql = "sql"
Allow user to specify memory/cores/etc for every session
jupyter-incubator/sparkmagic
diff --git a/tests/test_clientmanagerstateserializer.py b/tests/test_clientmanagerstateserializer.py index 4310058..4ed8ed7 100644 --- a/tests/test_clientmanagerstateserializer.py +++ b/tests/test_clientmanagerstateserializer.py @@ -27,7 +27,7 @@ def test_deserialize_not_emtpy(): "name": "py", "id": "1", "sqlcontext": true, - "language": "python", + "kind": "pyspark", "connectionstring": "url=https://mysite.com/livy;username=user;password=pass", "version": "0.0.0" }, @@ -35,7 +35,7 @@ def test_deserialize_not_emtpy(): "name": "sc", "id": "2", "sqlcontext": false, - "language": "scala", + "kind": "spark", "connectionstring": "url=https://mysite.com/livy;username=user;password=pass", "version": "0.0.0" } @@ -50,17 +50,15 @@ def test_deserialize_not_emtpy(): (name, client) = deserialized[0] assert name == "py" - client_factory.create_session.assert_any_call("python", - "url=https://mysite.com/livy;username=user;password=pass", - "1", True) - client_factory.build_client.assert_any_call("python", session) + client_factory.create_session.assert_any_call("url=https://mysite.com/livy;username=user;password=pass", + "1", True, {"kind":"pyspark"}) + client_factory.build_client.assert_any_call(session) (name, client) = deserialized[1] assert name == "sc" - client_factory.create_session.assert_any_call("scala", - "url=https://mysite.com/livy;username=user;password=pass", - "2", False) - client_factory.build_client.assert_any_call("scala", session) + client_factory.create_session.assert_any_call("url=https://mysite.com/livy;username=user;password=pass", + "2", False, {"kind":"spark"}) + client_factory.build_client.assert_any_call(session) def test_deserialize_not_emtpy_but_dead(): @@ -75,7 +73,7 @@ def test_deserialize_not_emtpy_but_dead(): "name": "py", "id": "1", "sqlcontext": true, - "language": "python", + "kind": "pyspark", "connectionstring": "url=https://mysite.com/livy;username=user;password=pass", "version": "0.0.0" }, @@ -83,7 +81,7 @@ def test_deserialize_not_emtpy_but_dead(): "name": "sc", "id": "2", "sqlcontext": false, - "language": "scala", + "kind": "spark", "connectionstring": "url=https://mysite.com/livy;username=user;password=pass", "version": "0.0.0" } @@ -113,7 +111,7 @@ def test_deserialize_not_emtpy_but_error(): "name": "py", "id": "1", "sqlcontext": true, - "language": "python", + "kind": "pyspark", "connectionstring": "url=https://mysite.com/livy;username=user;password=pass", "version": "0.0.0" }, @@ -121,7 +119,7 @@ def test_deserialize_not_emtpy_but_error(): "name": "sc", "id": "2", "sqlcontext": false, - "language": "scala", + "kind": "spark", "connectionstring": "url=https://mysite.com/livy;username=user;password=pass", "version": "0.0.0" } @@ -155,11 +153,11 @@ def test_serialize_not_empty(): client_factory = MagicMock() reader_writer = MagicMock() client1 = MagicMock() - client1.serialize.return_value = {"id": "1", "sqlcontext": True, "language": "python", + client1.serialize.return_value = {"id": "1", "sqlcontext": True, "kind": "pyspark", "connectionstring": "url=https://mysite.com/livy;username=user;password=pass", "version": "0.0.0"} client2 = MagicMock() - client2.serialize.return_value = {"id": "2", "sqlcontext": False, "language": "scala", + client2.serialize.return_value = {"id": "2", "sqlcontext": False, "kind": "spark", "connectionstring": "url=https://mysite.com/livy;username=user;password=pass", "version": "0.0.0"} serializer = ClientManagerStateSerializer(client_factory, reader_writer) @@ -169,9 +167,9 @@ def test_serialize_not_empty(): # Verify write was called with following string expected_str = '{"clients": [{"name": "py", "connectionstring": "url=https://mysite.com/livy;username=user;p' \ - 'assword=pass", "version": "0.0.0", "language": "python", "sqlcontext": true, "id": "1"}, {"n' \ + 'assword=pass", "version": "0.0.0", "kind": "pyspark", "sqlcontext": true, "id": "1"}, {"n' \ 'ame": "sc", "connectionstring": "url=https://mysite.com/livy;username=user;password=pass", "ve' \ - 'rsion": "0.0.0", "language": "scala", "sqlcontext": false, "id": "2"}]}' + 'rsion": "0.0.0", "kind": "spark", "sqlcontext": false, "id": "2"}]}' expected_dict = json.loads(expected_str) call_list = reader_writer.overwrite_with_line.call_args_list assert len(call_list) == 1 diff --git a/tests/test_livyclient.py b/tests/test_livyclient.py index a47e023..57870fa 100644 --- a/tests/test_livyclient.py +++ b/tests/test_livyclient.py @@ -3,6 +3,7 @@ from remotespark.livyclientlib.livyclient import LivyClient from remotespark.livyclientlib.livysessionstate import LivySessionState from remotespark.utils.utils import get_connection_string +from remotespark.utils.constants import Constants def test_create_sql_context_automatically(): @@ -55,7 +56,7 @@ def test_serialize(): connection_string = get_connection_string(url, username, password) http_client = MagicMock() http_client.connection_string = connection_string - kind = "scala" + kind = Constants.session_kind_spark session_id = "-1" sql_created = False session = MagicMock() @@ -67,7 +68,7 @@ def test_serialize(): assert serialized["connectionstring"] == connection_string assert serialized["id"] == "-1" - assert serialized["language"] == kind + assert serialized["kind"] == kind assert serialized["sqlcontext"] == sql_created assert serialized["version"] == "0.0.0" assert len(serialized.keys()) == 5 @@ -82,16 +83,16 @@ def test_close_session(): mock_spark_session.delete.assert_called_once_with() -def test_language(): - lang = "python" +def test_kind(): + kind = "pyspark" mock_spark_session = MagicMock() - language_mock = PropertyMock(return_value=lang) - type(mock_spark_session).language = language_mock + language_mock = PropertyMock(return_value=kind) + type(mock_spark_session).kind = language_mock client = LivyClient(mock_spark_session) - l = client.language + l = client.kind - assert l == lang + assert l == kind def test_session_id(): diff --git a/tests/test_livyclientfactory.py b/tests/test_livyclientfactory.py index 1e2e4f2..643b103 100644 --- a/tests/test_livyclientfactory.py +++ b/tests/test_livyclientfactory.py @@ -2,6 +2,8 @@ from mock import MagicMock from nose.tools import raises from remotespark.livyclientlib.livyclientfactory import LivyClientFactory +from remotespark.livyclientlib.pandaspysparklivyclient import PandasPysparkLivyClient +from remotespark.livyclientlib.pandasscalalivyclient import PandasScalaLivyClient from remotespark.utils.constants import Constants from remotespark.utils.utils import get_connection_string @@ -9,36 +11,58 @@ from remotespark.utils.utils import get_connection_string def test_build_session_with_defaults(): factory = LivyClientFactory() connection_string = get_connection_string("url", "user", "pass") - language = "python" + kind = Constants.session_kind_pyspark + properties = {"kind": kind} - session = factory.create_session(language, connection_string) + session = factory.create_session(connection_string, properties) - assert session.language == language + assert session.kind == kind assert session.id == "-1" assert session.started_sql_context is False + assert session.properties == properties def test_build_session(): factory = LivyClientFactory() connection_string = get_connection_string("url", "user", "pass") - language = "python" + kind = Constants.session_kind_pyspark + properties = {"kind": kind} - session = factory.create_session(language, connection_string, "1", True) + session = factory.create_session(connection_string, properties, "1", True) - assert session.language == language + assert session.kind == kind assert session.id == "1" assert session.started_sql_context + assert session.properties == properties def test_can_build_all_clients(): - session = MagicMock() factory = LivyClientFactory() - for language in Constants.lang_supported: - factory.build_client(language, session) + for kind in Constants.session_kinds_supported: + session = MagicMock() + session.kind = kind + factory.build_client(session) @raises(ValueError) def test_build_unknown_language(): session = MagicMock() + session.kind = "unknown" + factory = LivyClientFactory() + factory.build_client(session) + + +def test_build_pyspark(): + session = MagicMock() + session.kind = Constants.session_kind_pyspark + factory = LivyClientFactory() + client = factory.build_client(session) + assert isinstance(client, PandasPysparkLivyClient) + + +def test_build_spark(): + session = MagicMock() + session.kind = Constants.session_kind_spark factory = LivyClientFactory() - factory.build_client("unknown", session) + client = factory.build_client(session) + assert isinstance(client, PandasScalaLivyClient) diff --git a/tests/test_livysession.py b/tests/test_livysession.py index 0090e1b..6ee3d1d 100644 --- a/tests/test_livysession.py +++ b/tests/test_livysession.py @@ -7,7 +7,8 @@ from remotespark.livyclientlib.livyclienttimeouterror import LivyClientTimeoutEr from remotespark.livyclientlib.livyunexpectedstatuserror import LivyUnexpectedStatusError from remotespark.livyclientlib.livysession import LivySession import remotespark.utils.configuration as conf -from remotespark.utils.utils import get_connection_string, get_instance_id +from remotespark.utils.utils import get_connection_string +from remotespark.utils.constants import Constants class DummyResponse: @@ -23,41 +24,22 @@ class DummyResponse: class TestLivySession: - pi_result = "Pi is roughly 3.14336" - - session_create_json = '{"id":0,"state":"starting","kind":"spark","log":[]}' - ready_sessions_json = '{"from":0,"total":1,"sessions":[{"id":0,"state":"idle","kind":"spark","log":["16:23:01,15' \ - '1 |-INFO in ch.qos.logback.core.joran.action.AppenderAction - Naming appender as [STDOUT]' \ - '","16:23:01,213 |-INFO in ch.qos.logback.core.joran.action.NestedComplexPropertyIA - As' \ - 'suming default type [ch.qos.logback.access.PatternLayoutEncoder] for [encoder] propert' \ - 'y","16:23:01,368 |-INFO in ch.qos.logback.core.joran.action.AppenderRefAction - Attachin' \ - 'g appender named [STDOUT] to null","16:23:01,368 |-INFO in ch.qos.logback.access.joran.act' \ - 'ion.ConfigurationAction - End of configuration.","16:23:01,371 |-INFO in ch.qos.logback.ac' \ - 'cess.joran.JoranConfigurator@53799e55 - Registering current configuration as safe fallback' \ - ' point","","15/09/04 16:23:01 INFO server.ServerConnector: Started ServerConnector@388859' \ - 'e4{HTTP/1.1}{0.0.0.0:37394}","15/09/04 16:23:01 INFO server.Server: Started @27514ms","' \ - '15/09/04 16:23:01 INFO livy.WebServer: Starting server on 37394","Starting livy-repl on' \ - ' http://10.0.0.11:37394"]}]}' - error_sessions_json = '{"from":0,"total":1,"sessions":[{"id":0,"state":"error","kind":"spark","log":[]}]}' - busy_sessions_json = '{"from":0,"total":1,"sessions":[{"id":0,"state":"busy","kind":"spark","log":["16:23:01,151' \ - ' |-INFO in ch.qos.logback.core.joran.action.AppenderAction - Naming appender as [STDOUT]",' \ - '"16:23:01,213 |-INFO in ch.qos.logback.core.joran.action.NestedComplexPropertyIA - Assumin' \ - 'g default type [ch.qos.logback.access.PatternLayoutEncoder] for [encoder] property","16:23' \ - ':01,368 |-INFO in ch.qos.logback.core.joran.action.AppenderRefAction - Attaching appender ' \ - 'named [STDOUT] to null","16:23:01,368 |-INFO in ch.qos.logback.access.joran.action.Configu' \ - 'rationAction - End of configuration.","16:23:01,371 |-INFO in ch.qos.logback.access.joran.' \ - 'JoranConfigurator@53799e55 - Registering current configuration as safe fallback point","",' \ - '"15/09/04 16:23:01 INFO server.ServerConnector: Started ServerConnector@388859e4{HTTP/1.1}' \ - '{0.0.0.0:37394}","15/09/04 16:23:01 INFO server.Server: Started @27514ms","15/09/04 16:23:' \ - '01 INFO livy.WebServer: Starting server on 37394","Starting livy-repl on http://10.0.0.11:' \ - '37394"]}]}' - post_statement_json = '{"id":0,"state":"running","output":null}' - running_statement_json = '{"total_statements":1,"statements":[{"id":0,"state":"running","output":null}]}' - ready_statement_json = '{"total_statements":1,"statements":[{"id":0,"state":"available","output":{"status":"ok",' \ - '"execution_count":0,"data":{"text/plain":"Pi is roughly 3.14336"}}}]}' - - get_responses = [] - post_responses = [] + + def __init__(self): + self.pi_result = "Pi is roughly 3.14336" + + self.session_create_json = '{"id":0,"state":"starting","kind":"spark","log":[]}' + self.ready_sessions_json = '{"from":0,"total":1,"sessions":[{"id":0,"state":"idle","kind":"spark","log":[""]}]}' + self.error_sessions_json = '{"from":0,"total":1,"sessions":[{"id":0,"state":"error","kind":"spark","log":' \ + '[""]}]}' + self.busy_sessions_json = '{"from":0,"total":1,"sessions":[{"id":0,"state":"busy","kind":"spark","log":[""]}]}' + self.post_statement_json = '{"id":0,"state":"running","output":null}' + self.running_statement_json = '{"total_statements":1,"statements":[{"id":0,"state":"running","output":null}]}' + self.ready_statement_json = '{"total_statements":1,"statements":[{"id":0,"state":"available","output":{"statu' \ + 's":"ok","execution_count":0,"data":{"text/plain":"Pi is roughly 3.14336"}}}]}' + + self.get_responses = [] + self.post_responses = [] def _next_response_get(self, *args): val = self.get_responses[0] @@ -69,103 +51,83 @@ class TestLivySession: self.post_responses = self.post_responses[1:] return val + def _create_session(self, kind=Constants.session_kind_spark, session_id="-1", sql_created=False, http_client=None): + if http_client is None: + http_client = MagicMock() + + return LivySession(http_client, session_id, sql_created, {"kind": kind}) + @raises(AssertionError) def test_constructor_throws_status_sleep_seconds(self): - kind = "scala" - http_client = MagicMock() - session_id = "-1" - sql_created = False conf.override({ "status_sleep_seconds": 0, "statement_sleep_seconds": 2, "create_sql_context_timeout_seconds": 60 }) - LivySession(http_client, kind, session_id, sql_created) + self._create_session() conf.load() @raises(AssertionError) def test_constructor_throws_statement_sleep_seconds(self): - kind = "scala" - http_client = MagicMock() - session_id = "-1" - sql_created = False conf.override({ "status_sleep_seconds": 3, "statement_sleep_seconds": 0, "create_sql_context_timeout_seconds": 60 }) - LivySession(http_client, kind, session_id, sql_created) + self._create_session() conf.load({}) @raises(AssertionError) def test_constructor_throws_sql_create_timeout_seconds(self): - kind = "scala" - http_client = MagicMock() - session_id = "-1" - sql_created = False conf.override({ "status_sleep_seconds": 4, "statement_sleep_seconds": 2, "create_sql_context_timeout_seconds": 0 }) - LivySession(http_client, kind, session_id, sql_created) + self._create_session() conf.load() @raises(ValueError) def test_constructor_throws_invalid_session_sql_combo(self): - kind = "scala" - http_client = MagicMock() - session_id = "-1" - sql_created = True conf.override({ "status_sleep_seconds": 2, "statement_sleep_seconds": 2, "create_sql_context_timeout_seconds": 60 }) - LivySession(http_client, kind, session_id, sql_created) + self._create_session(sql_created=True) conf.load() def test_constructor_starts_with_existing_session(self): - kind = "scala" - http_client = MagicMock() - session_id = "1" - sql_created = True conf.override({ "status_sleep_seconds": 4, "statement_sleep_seconds": 2, "create_sql_context_timeout_seconds": 60 }) - session = LivySession(http_client, kind, session_id, sql_created) + session_id = "1" + session = self._create_session(session_id=session_id, sql_created=True) conf.load() - assert session.id == "1" + assert session.id == session_id assert session.started_sql_context def test_constructor_starts_with_no_session(self): - kind = "scala" - http_client = MagicMock() - session_id = "-1" - sql_created = False conf.override({ "status_sleep_seconds": 4, "statement_sleep_seconds": 2, "create_sql_context_timeout_seconds": 60 }) - session = LivySession(http_client, kind, session_id, sql_created) + session = self._create_session() conf.load() assert session.id == "-1" assert not session.started_sql_context def test_is_final_status(self): - kind = "scala" - http_client = MagicMock() - conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) - session = LivySession(http_client, kind, "-1", False) + session = self._create_session() conf.load() assert not session.is_final_status("idle") @@ -176,7 +138,6 @@ class TestLivySession: assert session.is_final_status("error") def test_start_scala_starts_session(self): - kind = "scala" http_client = MagicMock() http_client.post.return_value = DummyResponse(201, self.session_create_json) @@ -184,18 +145,18 @@ class TestLivySession: "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) - session = LivySession(http_client, kind, "-1", False) + kind = Constants.session_kind_spark + session = self._create_session(kind=kind, http_client=http_client) session.start() conf.load() - assert_equals(kind, session.language) + assert_equals(kind, session.kind) assert_equals("starting", session._status) assert_equals("0", session.id) http_client.post.assert_called_with( - "/sessions", [201], {"kind": "spark", "name": "remotesparkmagics_{}".format(get_instance_id())}) + "/sessions", [201], {"kind": "spark"}) def test_start_python_starts_session(self): - kind = "python" http_client = MagicMock() http_client.post.return_value = DummyResponse(201, self.session_create_json) @@ -203,15 +164,33 @@ class TestLivySession: "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) - session = LivySession(http_client, kind, "-1", False) + kind = Constants.session_kind_pyspark + session = self._create_session(kind=kind, http_client=http_client) session.start() conf.load() - assert_equals(kind, session.language) + assert_equals(kind, session.kind) assert_equals("starting", session._status) assert_equals("0", session.id) http_client.post.assert_called_with( - "/sessions", [201],{"kind": "pyspark", "name": "remotesparkmagics_{}".format(get_instance_id())}) + "/sessions", [201], {"kind": "pyspark"}) + + def test_start_passes_in_all_properties(self): + http_client = MagicMock() + http_client.post.return_value = DummyResponse(201, self.session_create_json) + + conf.override({ + "status_sleep_seconds": 0.01, + "statement_sleep_seconds": 0.01 + }) + kind = Constants.session_kind_spark + properties = {"kind": kind, "extra": 1} + session = LivySession(http_client, "-1", False, properties) + session.start() + conf.load() + + http_client.post.assert_called_with( + "/sessions", [201], properties) def test_status_gets_latest(self): http_client = MagicMock() @@ -221,7 +200,7 @@ class TestLivySession: "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) - session = LivySession(http_client, "scala", "-1", False) + session = self._create_session(http_client=http_client) conf.load() session.start() @@ -242,7 +221,7 @@ class TestLivySession: "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) - session = LivySession(http_client, "scala", "-1", False) + session = self._create_session(http_client=http_client) conf.override({}) session.start() @@ -265,7 +244,7 @@ class TestLivySession: "status_sleep_seconds": 0.011, "statement_sleep_seconds": 6000 }) - session = LivySession(http_client, "scala", "-1", False) + session = self._create_session(http_client=http_client) conf.load() session.start() @@ -285,7 +264,7 @@ class TestLivySession: "status_sleep_seconds": 0.011, "statement_sleep_seconds": 6000 }) - session = LivySession(http_client, "scala", "-1", False) + session = self._create_session(http_client=http_client) conf.load() session.start() @@ -299,7 +278,7 @@ class TestLivySession: "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) - session = LivySession(http_client, "scala", "-1", False) + session = self._create_session(http_client=http_client) conf.load() session.start() @@ -315,7 +294,7 @@ class TestLivySession: "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) - session = LivySession(http_client, "scala", "-1", False) + session = self._create_session(http_client=http_client) conf.load() session.delete() @@ -331,14 +310,14 @@ class TestLivySession: "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) - session = LivySession(http_client, "scala", "-1", False) + session = self._create_session(http_client=http_client) conf.load() session._status = "dead" session.delete() def test_execute(self): - kind = "scala" + kind = Constants.session_kind_spark http_client = MagicMock() self.post_responses = [DummyResponse(201, self.session_create_json), DummyResponse(201, self.post_statement_json)] @@ -350,7 +329,7 @@ class TestLivySession: "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) - session = LivySession(http_client, kind, "-1", False) + session = self._create_session(kind=kind, http_client=http_client) conf.load() session.start() command = "command" @@ -364,7 +343,7 @@ class TestLivySession: assert_equals(self.pi_result, result[1]) def test_create_sql_hive_context_happens_once(self): - kind = "scala" + kind = Constants.session_kind_spark http_client = MagicMock() self.post_responses = [DummyResponse(201, self.session_create_json), DummyResponse(201, self.post_statement_json), @@ -380,7 +359,7 @@ class TestLivySession: "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) - session = LivySession(http_client, kind, "-1", False) + session = self._create_session(kind=kind, http_client=http_client) conf.load() session.start() @@ -396,12 +375,11 @@ class TestLivySession: "(sc)\nimport sqlContext.implicits._"}) \ in http_client.post.call_args_list assert call("/sessions/0/statements", [201], {"code": "val hiveContext = new org.apache.spark.sql.hive.Hive" - "Context(sc)"}) \ - in http_client.post.call_args_list + "Context(sc)"}) in http_client.post.call_args_list assert len(http_client.post.call_args_list) == 2 def test_create_sql_context_spark(self): - kind = "scala" + kind = Constants.session_kind_spark http_client = MagicMock() self.post_responses = [DummyResponse(201, self.session_create_json), DummyResponse(201, self.post_statement_json), @@ -417,7 +395,7 @@ class TestLivySession: "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) - session = LivySession(http_client, kind, "-1", False) + session = self._create_session(kind=kind, http_client=http_client) conf.load() session.start() @@ -427,12 +405,10 @@ class TestLivySession: "(sc)\nimport sqlContext.implicits._"}) \ in http_client.post.call_args_list assert call("/sessions/0/statements", [201], {"code": "val hiveContext = new org.apache.spark.sql.hive.Hive" - "Context(sc)"}) \ - in http_client.post.call_args_list - + "Context(sc)"}) in http_client.post.call_args_list def test_create_sql_hive_context_pyspark(self): - kind = "python" + kind = Constants.session_kind_pyspark http_client = MagicMock() self.post_responses = [DummyResponse(201, self.session_create_json), DummyResponse(201, self.post_statement_json), @@ -448,16 +424,15 @@ class TestLivySession: "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) - session = LivySession(http_client, kind, "-1", False) + session = self._create_session(kind=kind, http_client=http_client) conf.load() session.start() session.create_sql_context() - assert call("/sessions/0/statements", [201], {"code": "from pyspark.sql import SQLContext\n" - "from pyspark.sql.types import *\n" - "sqlContext = SQLContext(sc)"}) \ - in http_client.post.call_args_list + assert call("/sessions/0/statements", [201], {"code": "from pyspark.sql import SQLContext\nfrom pyspark." + "sql.types import *\nsqlContext = SQLContext(" + "sc)"}) in http_client.post.call_args_list assert call("/sessions/0/statements", [201], {"code": "from pyspark.sql import HiveContext\n" "hiveContext = HiveContext(sc)"}) \ in http_client.post.call_args_list @@ -477,7 +452,7 @@ class TestLivySession: "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) - session = LivySession(http_client, kind, "-1", False) + session = self._create_session(kind=kind, http_client=http_client) conf.load() session.start() @@ -490,19 +465,19 @@ class TestLivySession: connection_string = get_connection_string(url, username, password) http_client = MagicMock() http_client.connection_string = connection_string - kind = "scala" + kind = Constants.session_kind_spark conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) - session = LivySession(http_client, kind, "-1", False) + session = self._create_session(kind=kind, http_client=http_client) conf.load() serialized = session.get_state().to_dict() assert serialized["connectionstring"] == connection_string assert serialized["id"] == "-1" - assert serialized["language"] == kind + assert serialized["kind"] == kind assert serialized["sqlcontext"] == False assert serialized["version"] == "0.0.0" assert len(serialized.keys()) == 5 diff --git a/tests/test_remotesparkmagics.py b/tests/test_remotesparkmagics.py index e5f6058..8afdc90 100644 --- a/tests/test_remotesparkmagics.py +++ b/tests/test_remotesparkmagics.py @@ -3,6 +3,7 @@ from nose.tools import raises, with_setup from remotespark.remotesparkmagics import RemoteSparkMagics from remotespark.livyclientlib.dataframeparseexception import DataFrameParseException +import remotespark.utils.configuration as conf magic = None @@ -13,6 +14,8 @@ shell = None def _setup(): global magic, spark_controller, shell + conf.override({}) + shell = MagicMock() magic = RemoteSparkMagics(shell=None) magic.shell = shell @@ -38,7 +41,7 @@ def test_info_command_parses(): @with_setup(_setup, _teardown) def test_add_sessions_command_parses(): - # Do not skip + # Do not skip and python add_sessions_mock = MagicMock() spark_controller.add_session = add_sessions_mock command = "add" @@ -49,20 +52,38 @@ def test_add_sessions_command_parses(): magic.spark(line) - add_sessions_mock.assert_called_once_with(name, language, connection_string, False) + add_sessions_mock.assert_called_once_with(name, connection_string, False, {"kind": "pyspark"}) - # Skip + # Skip and scala - upper case add_sessions_mock = MagicMock() spark_controller.add_session = add_sessions_mock command = "add" name = "name" - language = "python" + language = "Scala" connection_string = "url=http://location:port;username=name;password=word" line = " ".join([command, name, language, connection_string, "skip"]) magic.spark(line) - add_sessions_mock.assert_called_once_with(name, language, connection_string, True) + add_sessions_mock.assert_called_once_with(name, connection_string, True, {"kind": "spark"}) + + +@with_setup(_setup, _teardown) +def test_add_sessions_command_extra_properties(): + magic.spark("config {\"extra\": \"yes\"}") + assert magic.properties == {"extra": "yes"} + + add_sessions_mock = MagicMock() + spark_controller.add_session = add_sessions_mock + command = "add" + name = "name" + language = "scala" + connection_string = "url=http://location:port;username=name;password=word" + line = " ".join([command, name, language, connection_string]) + + magic.spark(line) + + add_sessions_mock.assert_called_once_with(name, connection_string, False, {"kind": "spark", "extra": "yes"}) @with_setup(_setup, _teardown) diff --git a/tests/test_sparkcontroller.py b/tests/test_sparkcontroller.py index fcd1ff4..8d886fc 100644 --- a/tests/test_sparkcontroller.py +++ b/tests/test_sparkcontroller.py @@ -25,17 +25,17 @@ def _teardown(): @with_setup(_setup, _teardown) def test_add_session(): name = "name" - language = "python" + properties = {"kind": "spark"} connection_string = "url=http://location:port;username=name;password=word" client = "client" session = MagicMock() client_factory.create_session = MagicMock(return_value=session) client_factory.build_client = MagicMock(return_value=client) - controller.add_session(name, language, connection_string, False) + controller.add_session(name, connection_string, False, properties) - client_factory.create_session.assert_called_once_with(language, connection_string, "-1", False) - client_factory.build_client.assert_called_once_with(language, session) + client_factory.create_session.assert_called_once_with(connection_string, properties, "-1", False) + client_factory.build_client.assert_called_once_with(session) client_manager.add_client.assert_called_once_with(name, client) session.start.assert_called_once_with()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 10 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "mkdir ~/.sparkmagic", "cp remotespark/default_config.json ~/.sparkmagic/config.json" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
anyio==4.9.0 argon2-cffi==23.1.0 argon2-cffi-bindings==21.2.0 arrow==1.3.0 async-lru==2.0.5 attrs==25.3.0 babel==2.17.0 beautifulsoup4==4.13.3 bleach==6.2.0 certifi==2025.1.31 cffi==1.17.1 charset-normalizer==3.4.1 comm==0.2.2 decorator==5.2.1 defusedxml==0.7.1 exceptiongroup==1.2.2 fastjsonschema==2.21.1 fqdn==1.5.1 h11==0.14.0 httpcore==1.0.7 httpx==0.28.1 idna==3.10 importlib_metadata==8.6.1 iniconfig==2.1.0 ipykernel==4.1.1 ipython==4.0.0 ipython-genutils==0.2.0 ipywidgets==7.8.5 isoduration==20.11.0 Jinja2==3.1.6 json5==0.10.0 jsonpointer==3.0.0 jsonschema==4.23.0 jsonschema-specifications==2024.10.1 jupyter-events==0.12.0 jupyter-lsp==2.2.5 jupyter_client==8.6.3 jupyter_core==5.7.2 jupyter_server==2.15.0 jupyter_server_terminals==0.5.3 jupyterlab==4.1.5 jupyterlab_pygments==0.3.0 jupyterlab_server==2.27.3 jupyterlab_widgets==1.1.11 MarkupSafe==3.0.2 mistune==3.1.3 mock==5.2.0 narwhals==1.32.0 nbclient==0.10.2 nbconvert==7.16.6 nbformat==5.10.4 nose==1.3.7 notebook==7.1.3 notebook_shim==0.2.4 numpy==2.0.2 overrides==7.7.0 packaging==24.2 pandas==2.2.3 pandocfilters==1.5.1 pexpect==4.9.0 pickleshare==0.7.5 platformdirs==4.3.7 plotly==6.0.1 pluggy==1.5.0 prometheus_client==0.21.1 ptyprocess==0.7.0 pycparser==2.22 Pygments==2.19.1 pytest==8.3.5 python-dateutil==2.9.0.post0 python-json-logger==3.3.0 pytz==2025.2 PyYAML==6.0.2 pyzmq==26.3.0 referencing==0.36.2 -e git+https://github.com/jupyter-incubator/sparkmagic.git@3c0230b30b1f63780fbf2d4ba9d83e0a83f51eea#egg=remotespark requests==2.32.3 rfc3339-validator==0.1.4 rfc3986-validator==0.1.1 rpds-py==0.24.0 Send2Trash==1.8.3 simplegeneric==0.8.1 six==1.17.0 sniffio==1.3.1 soupsieve==2.6 terminado==0.18.1 tinycss2==1.4.0 tomli==2.2.1 tornado==6.4.2 traitlets==5.14.3 types-python-dateutil==2.9.0.20241206 typing_extensions==4.13.0 tzdata==2025.2 uri-template==1.3.0 urllib3==2.3.0 webcolors==24.11.1 webencodings==0.5.1 websocket-client==1.8.0 widgetsnbextension==3.6.10 zipp==3.21.0
name: sparkmagic channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - anyio==4.9.0 - argon2-cffi==23.1.0 - argon2-cffi-bindings==21.2.0 - arrow==1.3.0 - async-lru==2.0.5 - attrs==25.3.0 - babel==2.17.0 - beautifulsoup4==4.13.3 - bleach==6.2.0 - certifi==2025.1.31 - cffi==1.17.1 - charset-normalizer==3.4.1 - comm==0.2.2 - decorator==5.2.1 - defusedxml==0.7.1 - exceptiongroup==1.2.2 - fastjsonschema==2.21.1 - fqdn==1.5.1 - h11==0.14.0 - httpcore==1.0.7 - httpx==0.28.1 - idna==3.10 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - ipykernel==4.1.1 - ipython==4.0.0 - ipython-genutils==0.2.0 - ipywidgets==7.8.5 - isoduration==20.11.0 - jinja2==3.1.6 - json5==0.10.0 - jsonpointer==3.0.0 - jsonschema==4.23.0 - jsonschema-specifications==2024.10.1 - jupyter-client==8.6.3 - jupyter-core==5.7.2 - jupyter-events==0.12.0 - jupyter-lsp==2.2.5 - jupyter-server==2.15.0 - jupyter-server-terminals==0.5.3 - jupyterlab==4.1.5 - jupyterlab-pygments==0.3.0 - jupyterlab-server==2.27.3 - jupyterlab-widgets==1.1.11 - markupsafe==3.0.2 - mistune==3.1.3 - mock==5.2.0 - narwhals==1.32.0 - nbclient==0.10.2 - nbconvert==7.16.6 - nbformat==5.10.4 - nose==1.3.7 - notebook==7.1.3 - notebook-shim==0.2.4 - numpy==2.0.2 - overrides==7.7.0 - packaging==24.2 - pandas==2.2.3 - pandocfilters==1.5.1 - pexpect==4.9.0 - pickleshare==0.7.5 - platformdirs==4.3.7 - plotly==6.0.1 - pluggy==1.5.0 - prometheus-client==0.21.1 - ptyprocess==0.7.0 - pycparser==2.22 - pygments==2.19.1 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - python-json-logger==3.3.0 - pytz==2025.2 - pyyaml==6.0.2 - pyzmq==26.3.0 - referencing==0.36.2 - requests==2.32.3 - rfc3339-validator==0.1.4 - rfc3986-validator==0.1.1 - rpds-py==0.24.0 - send2trash==1.8.3 - simplegeneric==0.8.1 - six==1.17.0 - sniffio==1.3.1 - soupsieve==2.6 - terminado==0.18.1 - tinycss2==1.4.0 - tomli==2.2.1 - tornado==6.4.2 - traitlets==5.14.3 - types-python-dateutil==2.9.0.20241206 - typing-extensions==4.13.0 - tzdata==2025.2 - uri-template==1.3.0 - urllib3==2.3.0 - webcolors==24.11.1 - webencodings==0.5.1 - websocket-client==1.8.0 - widgetsnbextension==3.6.10 - zipp==3.21.0 prefix: /opt/conda/envs/sparkmagic
[ "tests/test_clientmanagerstateserializer.py::test_deserialize_not_emtpy", "tests/test_livyclient.py::test_serialize", "tests/test_livyclient.py::test_kind", "tests/test_livyclientfactory.py::test_build_session_with_defaults", "tests/test_livyclientfactory.py::test_build_session", "tests/test_livyclientfactory.py::test_can_build_all_clients", "tests/test_livyclientfactory.py::test_build_unknown_language", "tests/test_livyclientfactory.py::test_build_pyspark", "tests/test_livyclientfactory.py::test_build_spark" ]
[ "tests/test_clientmanagerstateserializer.py::test_deserialize_not_emtpy_but_dead", "tests/test_clientmanagerstateserializer.py::test_deserialize_not_emtpy_but_error", "tests/test_clientmanagerstateserializer.py::test_deserialize_empty", "tests/test_remotesparkmagics.py::test_info_command_parses", "tests/test_remotesparkmagics.py::test_add_sessions_command_parses", "tests/test_remotesparkmagics.py::test_add_sessions_command_extra_properties", "tests/test_remotesparkmagics.py::test_delete_sessions_command_parses", "tests/test_remotesparkmagics.py::test_cleanup_command_parses", "tests/test_remotesparkmagics.py::test_bad_command_throws_exception", "tests/test_remotesparkmagics.py::test_run_cell_command_parses", "tests/test_remotesparkmagics.py::test_run_cell_command_writes_to_err", "tests/test_remotesparkmagics.py::test_run_sql_command_parses", "tests/test_remotesparkmagics.py::test_run_hive_command_parses", "tests/test_remotesparkmagics.py::test_run_sql_command_returns_none_when_exception", "tests/test_remotesparkmagics.py::test_run_hive_command_returns_none_when_exception", "tests/test_remotesparkmagics.py::test_run_sql_command_stores_variable_in_user_ns", "tests/test_sparkcontroller.py::test_add_session", "tests/test_sparkcontroller.py::test_add_session_skip", "tests/test_sparkcontroller.py::test_delete_session", "tests/test_sparkcontroller.py::test_cleanup", "tests/test_sparkcontroller.py::test_run_cell", "tests/test_sparkcontroller.py::test_get_client_keys" ]
[ "tests/test_clientmanagerstateserializer.py::test_serializer_throws_none_path", "tests/test_clientmanagerstateserializer.py::test_serializer_throws_none_factory", "tests/test_clientmanagerstateserializer.py::test_serialize_not_empty", "tests/test_livyclient.py::test_create_sql_context_automatically", "tests/test_livyclient.py::test_execute_code", "tests/test_livyclient.py::test_execute_sql", "tests/test_livyclient.py::test_execute_hive", "tests/test_livyclient.py::test_close_session", "tests/test_livyclient.py::test_session_id" ]
[]
Modified BSD License
343
mogproject__color-ssh-8
3c6ef87beb0faf48b0af7f4498b1be5ff34e6fe1
2015-12-20 18:03:15
cdcbb8980f7a4e49797192dc089915d702322460
diff --git a/src/color_ssh/__init__.py b/src/color_ssh/__init__.py index d18f409..b794fd4 100644 --- a/src/color_ssh/__init__.py +++ b/src/color_ssh/__init__.py @@ -1,1 +1,1 @@ -__version__ = '0.0.2' +__version__ = '0.1.0' diff --git a/src/color_ssh/color_ssh.py b/src/color_ssh/color_ssh.py index 4c278c2..20f7651 100644 --- a/src/color_ssh/color_ssh.py +++ b/src/color_ssh/color_ssh.py @@ -1,71 +1,135 @@ from __future__ import division, print_function, absolute_import, unicode_literals import sys +import io import shlex import subprocess from optparse import OptionParser from color_ssh.util.util import * +from multiprocessing.pool import Pool __all__ = [] class Setting(object): VERSION = 'color-ssh %s' % __import__('color_ssh').__version__ - USAGE = """%prog [options...] [user@]hostname command""" + USAGE = '\n'.join([ + '%prog [options...] [user@]hostname command', + ' %prog [options...] -h host_file command', + ' %prog [options...] -H "[user@]hostname [[user@]hostname]...]" command' + ]) + DEFAULT_PARALLELISM = 32 - def __init__(self, label=None, command=None): - self.label = label - self.command = command + def __init__(self, parallelism=None, tasks=None): + self.parallelism = parallelism + self.tasks = tasks def parse_args(self, argv, stdout=io2bytes(sys.stdout)): """ :param argv: list of str :param stdout: binary-data stdout output """ - parser = OptionParser(version=self.VERSION, usage=self.USAGE) + parser = OptionParser(version=self.VERSION, usage=self.USAGE, conflict_handler='resolve') parser.allow_interspersed_args = False parser.add_option( '-l', '--label', dest='label', default=None, type='string', metavar='LABEL', - help='set label name to LABEL' + help='label name' ) parser.add_option( '--ssh', dest='ssh', default=str('ssh'), type='string', metavar='SSH', - help='override ssh command line string to SSH' + help='override ssh command line string' + ) + parser.add_option( + '-h', '--hosts', dest='host_file', default=None, type='string', metavar='HOST_FILE', + help='hosts file (each line "[user@]host")' + ) + parser.add_option( + '-H', '--host', dest='host_string', default=None, type='string', metavar='HOST_STRING', + help='additional host entries ("[user@]host")' + ) + parser.add_option( + '-p', '--par', dest='parallelism', default=self.DEFAULT_PARALLELISM, type='int', metavar='PAR', + help='max number of parallel threads (default: %d)' % self.DEFAULT_PARALLELISM ) option, args = parser.parse_args(argv[1:]) + hosts = self._load_hosts(option.host_file) + (option.host_string.split() if option.host_string else []) - if len(args) < 2: + if len(args) < (1 if hosts else 2): stdout.write(arg2bytes(parser.format_help().encode('utf-8'))) parser.exit(2) - self.label = option.label or args[0].rsplit('@', 1)[-1] - self.command = shlex.split(option.ssh) + args + prefix = shlex.split(option.ssh) + + if not hosts: + hosts = args[:1] + command = args[1:] + else: + command = args + + tasks = [(option.label or self._extract_label(host), prefix + [host] + command) for host in hosts] + + self.parallelism = option.parallelism + self.tasks = tasks return self + @staticmethod + def _load_hosts(path): + if not path: + return [] + + with io.open(path) as f: + lines = f.readlines() + return list(filter(lambda x: x, (line.strip() for line in lines))) + + @staticmethod + def _extract_label(host): + return host.rsplit('@', 1)[-1] -def main(argv=sys.argv, stdin=io2bytes(sys.stdin), stdout=io2bytes(sys.stdout), stderr=io2bytes(sys.stderr)): - """ - Main function - """ - setting = Setting().parse_args(argv) - prefix = ['color-cat', '-l', setting.label] + +def run_task(args): + label, command = args + + # We don't pass stdout/stderr file descriptors since this function runs in the forked processes. + stdout = io2bytes(sys.stdout) + stderr = io2bytes(sys.stderr) + + prefix = ['color-cat', '-l', label] try: proc_stdout = subprocess.Popen(prefix, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr) proc_stderr = subprocess.Popen(prefix + ['-s', '+'], stdin=subprocess.PIPE, stdout=stderr, stderr=stderr) - ret = subprocess.call(setting.command, stdin=stdin, stdout=proc_stdout.stdin, stderr=proc_stderr.stdin) + ret = subprocess.call(command, stdin=None, stdout=proc_stdout.stdin, stderr=proc_stderr.stdin) proc_stdout.stdin.close() proc_stderr.stdin.close() proc_stdout.wait() proc_stderr.wait() + except Exception as e: + msg = '%s: %s\nlabel=%s, command=%s\n' % (e.__class__.__name__, e, label, command) + stderr.write(msg.encode('utf-8', 'ignore')) + return 1 + return ret + +def main(argv=sys.argv, stdout=io2bytes(sys.stdout), stderr=io2bytes(sys.stderr)): + """ + Main function + """ + + try: + setting = Setting().parse_args(argv, stdout) + n = min(len(setting.tasks), setting.parallelism) + if n <= 1: + ret = map(run_task, setting.tasks) + else: + pool = Pool(n) + ret = pool.map(run_task, setting.tasks) except Exception as e: - msg = '%s: %s\nCommand: %s\n' % (e.__class__.__name__, e, setting.command) + msg = '%s: %s\n' % (e.__class__.__name__, e) stderr.write(msg.encode('utf-8', 'ignore')) return 1 - return ret + return max(ret) diff --git a/src/color_ssh/util/util.py b/src/color_ssh/util/util.py index 55d27c2..9273954 100644 --- a/src/color_ssh/util/util.py +++ b/src/color_ssh/util/util.py @@ -13,4 +13,4 @@ def arg2bytes(arg): def io2bytes(fd): - return fd.buffer if PY3 else fd + return fd.buffer if hasattr(fd, 'buffer') else fd
Load host list from file - `--host` - with parallelism option - cf. pssh (parallel-ssh)
mogproject/color-ssh
diff --git a/tests/color_ssh/test_color_ssh.py b/tests/color_ssh/test_color_ssh.py index ea82202..b5c4ba7 100644 --- a/tests/color_ssh/test_color_ssh.py +++ b/tests/color_ssh/test_color_ssh.py @@ -1,11 +1,11 @@ # encoding: utf-8 from __future__ import division, print_function, absolute_import, unicode_literals -import os -import io import sys +import os import tempfile import six +from contextlib import contextmanager from mog_commons.unittest import TestCase from color_ssh import color_ssh from color_ssh.color_ssh import Setting @@ -13,9 +13,8 @@ from color_ssh.util.util import PY3 class TestSetting(TestCase): - def _check(self, setting, expected): - self.assertEqual(setting.label, expected.label) - self.assertEqual(setting.command, expected.command) + def _check(self, setting, tasks): + self.assertEqual(setting.tasks, tasks) def _parse(self, args): xs = [] @@ -30,35 +29,76 @@ class TestSetting(TestCase): def test_parse_args(self): self._check(self._parse(['server-1', 'pwd']), - Setting('server-1', ['ssh', 'server-1', 'pwd'])) + [('server-1', ['ssh', 'server-1', 'pwd'])]) self._check(self._parse(['user@server-1', 'ls', '-l']), - Setting('server-1', ['ssh', 'user@server-1', 'ls', '-l'])) + [('server-1', ['ssh', 'user@server-1', 'ls', '-l'])]) + + # label self._check(self._parse(['-l', 'label', 'user@server-1', 'ls', '-l']), - Setting('label', ['ssh', 'user@server-1', 'ls', '-l'])) + [('label', ['ssh', 'user@server-1', 'ls', '-l'])]) self._check(self._parse(['--label', 'label', 'user@server-1', 'ls', '-l']), - Setting('label', ['ssh', 'user@server-1', 'ls', '-l'])) + [('label', ['ssh', 'user@server-1', 'ls', '-l'])]) self._check(self._parse(['-llabel', 'user@server-1', 'ls', '-l']), - Setting('label', ['ssh', 'user@server-1', 'ls', '-l'])) + [('label', ['ssh', 'user@server-1', 'ls', '-l'])]) self._check(self._parse(['--label', 'label', '--ssh', '/usr/bin/ssh -v', 'user@server-1', 'ls', '-l']), - Setting('label', ['/usr/bin/ssh', '-v', 'user@server-1', 'ls', '-l'])) + [('label', ['/usr/bin/ssh', '-v', 'user@server-1', 'ls', '-l'])]) + + # ssh self._check(self._parse(['--ssh', '/usr/bin/ssh -v --option "a b c"', 'user@server-1', 'ls', '-l']), - Setting('server-1', ['/usr/bin/ssh', '-v', '--option', 'a b c', 'user@server-1', 'ls', '-l'])) + [('server-1', ['/usr/bin/ssh', '-v', '--option', 'a b c', 'user@server-1', 'ls', '-l'])]) self._check(self._parse(['--label', 'あいう'.encode('utf-8'), 'user@server-1', 'ls', '-l']), - Setting('あいう' if PY3 else 'あいう'.encode('utf-8'), ['ssh', 'user@server-1', 'ls', '-l'])) + [('あいう' if PY3 else 'あいう'.encode('utf-8'), ['ssh', 'user@server-1', 'ls', '-l'])]) self._check(self._parse(['--label', b'\xff\xfe', 'user@server-1', 'ls', '-l']), - Setting('\udcff\udcfe' if PY3 else b'\xff\xfe', ['ssh', 'user@server-1', 'ls', '-l'])) + [('\udcff\udcfe' if PY3 else b'\xff\xfe', ['ssh', 'user@server-1', 'ls', '-l'])]) self._check(self._parse(['server-1', 'echo', b'\xff\xfe']), - Setting('server-1', ['ssh', 'server-1', 'echo', '\udcff\udcfe' if PY3 else b'\xff\xfe'])) + [('server-1', ['ssh', 'server-1', 'echo', '\udcff\udcfe' if PY3 else b'\xff\xfe'])]) + + # hosts + hosts_path = os.path.join('tests', 'resources', 'test_color_ssh_hosts.txt') + self._check(self._parse(['-h', hosts_path, 'pwd']), [ + ('server-1', ['ssh', 'server-1', 'pwd']), + ('server-2', ['ssh', 'server-2', 'pwd']), + ('server-3', ['ssh', 'server-3', 'pwd']), + ('server-4', ['ssh', 'server-4', 'pwd']), + ('server-5', ['ssh', 'server-5', 'pwd']), + ('server-6', ['ssh', 'server-6', 'pwd']), + ('server-7', ['ssh', 'server-7', 'pwd']), + ('server-8', ['ssh', 'server-8', 'pwd']), + ('server-9', ['ssh', 'root@server-9', 'pwd']), + ('server-10', ['ssh', 'root@server-10', 'pwd']), + ]) + self._check(self._parse(['-H', 'server-11 root@server-12', 'pwd']), [ + ('server-11', ['ssh', 'server-11', 'pwd']), + ('server-12', ['ssh', 'root@server-12', 'pwd']), + ]) + self._check(self._parse(['--hosts', hosts_path, '--host', 'server-11 root@server-12', 'pwd']), [ + ('server-1', ['ssh', 'server-1', 'pwd']), + ('server-2', ['ssh', 'server-2', 'pwd']), + ('server-3', ['ssh', 'server-3', 'pwd']), + ('server-4', ['ssh', 'server-4', 'pwd']), + ('server-5', ['ssh', 'server-5', 'pwd']), + ('server-6', ['ssh', 'server-6', 'pwd']), + ('server-7', ['ssh', 'server-7', 'pwd']), + ('server-8', ['ssh', 'server-8', 'pwd']), + ('server-9', ['ssh', 'root@server-9', 'pwd']), + ('server-10', ['ssh', 'root@server-10', 'pwd']), + ('server-11', ['ssh', 'server-11', 'pwd']), + ('server-12', ['ssh', 'root@server-12', 'pwd']), + ]) + + # parallelism + self.assertEqual(self._parse(['-H', 'server-11 root@server-12', '-p3', 'pwd']).parallelism, 3) def test_parse_args_error(self): with self.withBytesOutput() as (out, err): self.assertSystemExit(2, Setting().parse_args, ['color-ssh'], out) self.assertSystemExit(2, Setting().parse_args, ['color-ssh', 'server-1'], out) self.assertSystemExit(2, Setting().parse_args, ['color-ssh', '--label', 'x'], out) + self.assertSystemExit(2, Setting().parse_args, ['color-ssh', '--host', ' ', 'pwd'], out) class TestMain(TestCase): - def test_main(self): + def test_main_single_proc(self): # requires: POSIX environment, color-cat command def f(bs): return b'\x1b[7m\x1b[35mtests/resources/test_color_ssh_01.sh\x1b[0m|\x1b[0m\x1b[35m' + bs + b'\n\x1b[0m' @@ -66,28 +106,83 @@ class TestMain(TestCase): def g(bs): return b'\x1b[7m\x1b[35mtests/resources/test_color_ssh_01.sh\x1b[0m+\x1b[0m\x1b[35m' + bs + b'\n\x1b[0m' - with tempfile.TemporaryFile() as out: - with tempfile.TemporaryFile() as err: - args = ['color-ssh', '--ssh', str('bash'), - os.path.join('tests', 'resources', 'test_color_ssh_01.sh'), 'abc', 'def'] - ret = color_ssh.main(args, stdout=out, stderr=err) - self.assertEqual(ret, 0) + with self.__with_temp_output() as (out, err): + args = ['color-ssh', '--ssh', str('bash'), + os.path.join('tests', 'resources', 'test_color_ssh_01.sh'), 'abc', 'def'] + ret = color_ssh.main(args, stdout=out, stderr=err) + self.assertEqual(ret, 0) + + out.seek(0) + err.seek(0) + + self.assertEqual(out.read(), f(b'abc') + f(b'foo') + f('あいうえお'.encode('utf-8')) + f(b'\xff\xfe')) + self.assertEqual(err.read(), g(b'def') + g(b'bar') + g('かきくけこ'.encode('utf-8')) + g(b'\xfd\xfc')) + + def test_main_multi_proc(self): + # requires: POSIX environment, color-cat command + def f(bs): + return b'\x1b[7m\x1b[35mtests/resources/test_color_ssh_01.sh\x1b[0m|\x1b[0m\x1b[35m' + bs + b'\n\x1b[0m' + + def g(bs): + return b'\x1b[7m\x1b[35mtests/resources/test_color_ssh_01.sh\x1b[0m+\x1b[0m\x1b[35m' + bs + b'\n\x1b[0m' + + with self.__with_temp_output() as (out, err): + path = os.path.join('tests', 'resources', 'test_color_ssh_01.sh') + args = ['color-ssh', '--ssh', str('bash'), '-H', '%s %s' % (path, path), 'abc', 'def'] + + self.assertFalse(out.closed) + self.assertFalse(err.closed) + + ret = color_ssh.main(args, stdout=out, stderr=err) + self.assertEqual(ret, 0) + + out.seek(0) + err.seek(0) + + self.assertEqual(out.read(), (f(b'abc') + f(b'foo') + f('あいうえお'.encode('utf-8')) + f(b'\xff\xfe')) * 2) + self.assertEqual(err.read(), (g(b'def') + g(b'bar') + g('かきくけこ'.encode('utf-8')) + g(b'\xfd\xfc')) * 2) + + def test_main_load_error(self): + with self.__with_temp_output() as (out, err): + args = ['color-ssh', '-h', 'not_exist_file', '--ssh', str('./tests/resources/not_exist_command'), 'x', 'y'] + ret = color_ssh.main(args, stdout=out, stderr=err) + self.assertEqual(ret, 1) + + out.seek(0) + err.seek(0) + + self.assertEqual(out.read(), b'') + self.assertTrue(b'No such file or directory' in err.read()) + + def test_main_task_error(self): + with self.__with_temp_output() as (out, err): + args = ['color-ssh', '--ssh', str('./tests/resources/not_exist_command'), 'x', 'y'] + ret = color_ssh.main(args, stdout=out, stderr=err) + self.assertEqual(ret, 1) - out.seek(0) - err.seek(0) + out.seek(0) + err.seek(0) - self.assertEqual(out.read(), f(b'abc') + f(b'foo') + f('あいうえお'.encode('utf-8')) + f(b'\xff\xfe')) - self.assertEqual(err.read(), g(b'def') + g(b'bar') + g('かきくけこ'.encode('utf-8')) + g(b'\xfd\xfc')) + self.assertEqual(out.read(), b'') + self.assertTrue(b'No such file or directory' in err.read()) - def test_main_error(self): + @staticmethod + @contextmanager + def __with_temp_output(): with tempfile.TemporaryFile() as out: with tempfile.TemporaryFile() as err: - args = ['color-ssh', '--ssh', str('./tests/resources/not_exist_command'), 'x', 'y'] - ret = color_ssh.main(args, stdout=out, stderr=err) - self.assertEqual(ret, 1) + old_stdout = sys.stdout + old_stderr = sys.stderr - out.seek(0) - err.seek(0) + try: + try: + sys.stdout.buffer = out + sys.stderr.buffer = err + except AttributeError: + sys.stdout = out + sys.stderr = err - self.assertEqual(out.read(), b'') - self.assertTrue(b'No such file or directory' in err.read()) + yield out, err + finally: + sys.stdout = old_stdout + sys.stderr = old_stderr diff --git a/tests/resources/test_color_ssh_hosts.txt b/tests/resources/test_color_ssh_hosts.txt new file mode 100644 index 0000000..3b9d2dd --- /dev/null +++ b/tests/resources/test_color_ssh_hosts.txt @@ -0,0 +1,10 @@ +server-1 +server-2 +server-3 +server-4 +server-5 +server-6 +server-7 +server-8 +root@server-9 +root@server-10 \ No newline at end of file
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 3, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 3 }
0.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pip", "pip_packages": [ "six", "mog-commons", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/mogproject/color-ssh.git@3c6ef87beb0faf48b0af7f4498b1be5ff34e6fe1#egg=color_ssh exceptiongroup==1.2.2 iniconfig==2.1.0 Jinja2==3.1.6 MarkupSafe==3.0.2 mog-commons==0.2.3 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 six==1.17.0 tomli==2.2.1
name: color-ssh channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - exceptiongroup==1.2.2 - iniconfig==2.1.0 - jinja2==3.1.6 - markupsafe==3.0.2 - mog-commons==0.2.3 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - six==1.17.0 - tomli==2.2.1 prefix: /opt/conda/envs/color-ssh
[ "tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_args", "tests/color_ssh/test_color_ssh.py::TestMain::test_main_load_error", "tests/color_ssh/test_color_ssh.py::TestMain::test_main_multi_proc", "tests/color_ssh/test_color_ssh.py::TestMain::test_main_single_proc", "tests/color_ssh/test_color_ssh.py::TestMain::test_main_task_error" ]
[]
[ "tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_args_error" ]
[]
null
344
mogproject__color-ssh-9
cdcbb8980f7a4e49797192dc089915d702322460
2015-12-20 18:59:01
cdcbb8980f7a4e49797192dc089915d702322460
diff --git a/src/color_ssh/color_ssh.py b/src/color_ssh/color_ssh.py index 20f7651..f8d82d8 100644 --- a/src/color_ssh/color_ssh.py +++ b/src/color_ssh/color_ssh.py @@ -5,8 +5,8 @@ import io import shlex import subprocess from optparse import OptionParser -from color_ssh.util.util import * from multiprocessing.pool import Pool +from color_ssh.util.util import * __all__ = [] @@ -52,6 +52,10 @@ class Setting(object): '-p', '--par', dest='parallelism', default=self.DEFAULT_PARALLELISM, type='int', metavar='PAR', help='max number of parallel threads (default: %d)' % self.DEFAULT_PARALLELISM ) + parser.add_option( + '--distribute', dest='distribute', default=None, type='string', metavar='PREFIX', + help='split and distribute command-line arguments to each host' + ) option, args = parser.parse_args(argv[1:]) hosts = self._load_hosts(option.host_file) + (option.host_string.split() if option.host_string else []) @@ -64,11 +68,16 @@ class Setting(object): if not hosts: hosts = args[:1] - command = args[1:] + del args[0] + + # distribute args + if option.distribute: + dist_prefix = shlex.split(option.distribute) + d = distribute(len(hosts), args) + tasks = [(option.label or self._extract_label(host), + prefix + [host] + dist_prefix + d[i]) for i, host in enumerate(hosts) if d[i]] else: - command = args - - tasks = [(option.label or self._extract_label(host), prefix + [host] + command) for host in hosts] + tasks = [(option.label or self._extract_label(host), prefix + [host] + args) for host in hosts] self.parallelism = option.parallelism self.tasks = tasks diff --git a/src/color_ssh/util/util.py b/src/color_ssh/util/util.py index 9273954..e1cdb8a 100644 --- a/src/color_ssh/util/util.py +++ b/src/color_ssh/util/util.py @@ -3,7 +3,7 @@ from __future__ import division, print_function, absolute_import, unicode_litera import sys import os -__all__ = ['PY3', 'arg2bytes', 'io2bytes'] +__all__ = ['PY3', 'arg2bytes', 'io2bytes', 'distribute'] PY3 = sys.version_info >= (3,) @@ -14,3 +14,26 @@ def arg2bytes(arg): def io2bytes(fd): return fd.buffer if hasattr(fd, 'buffer') else fd + + +def distribute(num_workers, tasks): + """ + Split tasks and distribute to each worker. + + :param num_workers: int + :param tasks: list + :return: [[task]] (list of the list of tasks) + """ + assert 0 <= num_workers, 'num_workers must be non-negative integer.' + + ret = [] + if num_workers == 0: + return ret + + quotient, extra = divmod(len(tasks), num_workers) + j = 0 + for i in range(num_workers): + k = quotient + (1 if i < extra else 0) + ret.append(tasks[j:j + k]) + j += k + return ret
distribute option
mogproject/color-ssh
diff --git a/tests/color_ssh/test_color_ssh.py b/tests/color_ssh/test_color_ssh.py index b5c4ba7..67684f8 100644 --- a/tests/color_ssh/test_color_ssh.py +++ b/tests/color_ssh/test_color_ssh.py @@ -88,6 +88,13 @@ class TestSetting(TestCase): # parallelism self.assertEqual(self._parse(['-H', 'server-11 root@server-12', '-p3', 'pwd']).parallelism, 3) + self.assertEqual(self._parse(['-H', 'server-11 root@server-12', '--par', '15', 'pwd']).parallelism, 15) + + # distribute + self._check(self._parse(['-H', 'server-11 root@server-12', '--distribute', 'echo "foo bar"', 'x', 'y', 'z']), [ + ('server-11', ['ssh', 'server-11', 'echo', 'foo bar', 'x', 'y']), + ('server-12', ['ssh', 'root@server-12', 'echo', 'foo bar', 'z']), + ]) def test_parse_args_error(self): with self.withBytesOutput() as (out, err): @@ -139,8 +146,10 @@ class TestMain(TestCase): out.seek(0) err.seek(0) - self.assertEqual(out.read(), (f(b'abc') + f(b'foo') + f('あいうえお'.encode('utf-8')) + f(b'\xff\xfe')) * 2) - self.assertEqual(err.read(), (g(b'def') + g(b'bar') + g('かきくけこ'.encode('utf-8')) + g(b'\xfd\xfc')) * 2) + self.assertEqual(sorted(out.read()), + sorted((f(b'abc') + f(b'foo') + f('あいうえお'.encode('utf-8')) + f(b'\xff\xfe')) * 2)) + self.assertEqual(sorted(err.read()), + sorted((g(b'def') + g(b'bar') + g('かきくけこ'.encode('utf-8')) + g(b'\xfd\xfc')) * 2)) def test_main_load_error(self): with self.__with_temp_output() as (out, err): diff --git a/tests/util/__init__.py b/tests/util/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/util/test_util.py b/tests/util/test_util.py new file mode 100644 index 0000000..291a4a7 --- /dev/null +++ b/tests/util/test_util.py @@ -0,0 +1,28 @@ +from __future__ import division, print_function, absolute_import, unicode_literals + +from mog_commons.unittest import TestCase +from color_ssh.util.util import distribute + + +class TestUtil(TestCase): + def test_distribute(self): + self.assertEqual(distribute(0, []), []) + self.assertEqual(distribute(0, ['a']), []) + self.assertEqual(distribute(1, []), [[]]) + self.assertEqual(distribute(1, ['a']), [['a']]) + self.assertEqual(distribute(1, ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']), + [['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']]) + self.assertEqual(distribute(2, ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']), + [['a', 'b', 'c', 'd'], ['e', 'f', 'g', 'h']]) + self.assertEqual(distribute(3, ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']), + [['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h']]) + self.assertEqual(distribute(5, ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']), + [['a', 'b'], ['c', 'd'], ['e', 'f'], ['g'], ['h']]) + self.assertEqual(distribute(5, ['a', 'b', 'c', 'd']), + [['a'], ['b'], ['c'], ['d'], []]) + + xs = distribute(12345, range(200000)) + self.assertEqual(sum(map(sum, xs)), 200000 * (200000 - 1) / 2) + + def test_distribute_error(self): + self.assertRaises(AssertionError, distribute, -1, [])
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 3, "test_score": 2 }, "num_modified_files": 2 }
0.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "six", "mog-commons", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.5", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 -e git+https://github.com/mogproject/color-ssh.git@cdcbb8980f7a4e49797192dc089915d702322460#egg=color_ssh importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work Jinja2==3.0.3 MarkupSafe==2.0.1 mog-commons==0.2.3 more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 six==1.17.0 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: color-ssh channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - jinja2==3.0.3 - markupsafe==2.0.1 - mog-commons==0.2.3 - six==1.17.0 prefix: /opt/conda/envs/color-ssh
[ "tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_args", "tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_args_error", "tests/color_ssh/test_color_ssh.py::TestMain::test_main_load_error", "tests/color_ssh/test_color_ssh.py::TestMain::test_main_multi_proc", "tests/color_ssh/test_color_ssh.py::TestMain::test_main_single_proc", "tests/color_ssh/test_color_ssh.py::TestMain::test_main_task_error", "tests/util/test_util.py::TestUtil::test_distribute", "tests/util/test_util.py::TestUtil::test_distribute_error" ]
[]
[]
[]
null
345
falconry__falcon-676
37f175f120aaea587c521715ed4815122446a953
2015-12-21 19:02:02
b78ffaac7c412d3b3d6cd3c70dd05024d79d2cce
diff --git a/falcon/request.py b/falcon/request.py index 54be8c0..bd83227 100644 --- a/falcon/request.py +++ b/falcon/request.py @@ -353,7 +353,8 @@ class Request(object): @property def client_accepts_msgpack(self): - return self.client_accepts('application/x-msgpack') + return (self.client_accepts('application/x-msgpack') + or self.client_accepts('application/msgpack')) @property def client_accepts_xml(self):
Request.client_accepts_msgpack only supports 'application/x-msgpack' The use of the 'x-' prefix is now discouraged for media types. We should update this Request property to also return True for 'application/msgpack', and verify the change with additional tests.
falconry/falcon
diff --git a/tests/test_req_vars.py b/tests/test_req_vars.py index 9e88754..c71f02e 100644 --- a/tests/test_req_vars.py +++ b/tests/test_req_vars.py @@ -348,6 +348,12 @@ class TestReqVars(testing.TestBase): self.assertFalse(req.client_accepts_json) self.assertTrue(req.client_accepts_msgpack) + headers = {'Accept': 'application/msgpack'} + req = Request(testing.create_environ(headers=headers)) + self.assertFalse(req.client_accepts_xml) + self.assertFalse(req.client_accepts_json) + self.assertTrue(req.client_accepts_msgpack) + headers = { 'Accept': 'application/json,application/xml,application/x-msgpack' }
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "nose", "coverage", "ddt", "pyyaml", "requests", "testtools", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "tools/test-requires" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi==2025.1.31 charset-normalizer==3.4.1 coverage==7.8.0 ddt==1.7.2 exceptiongroup==1.2.2 -e git+https://github.com/falconry/falcon.git@37f175f120aaea587c521715ed4815122446a953#egg=falcon idna==3.10 iniconfig==2.1.0 nose==1.3.7 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 python-mimeparse==2.0.0 PyYAML==6.0.2 requests==2.32.3 six==1.17.0 testtools==2.7.2 tomli==2.2.1 urllib3==2.3.0
name: falcon channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - certifi==2025.1.31 - charset-normalizer==3.4.1 - coverage==7.8.0 - ddt==1.7.2 - exceptiongroup==1.2.2 - idna==3.10 - iniconfig==2.1.0 - nose==1.3.7 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - python-mimeparse==2.0.0 - pyyaml==6.0.2 - requests==2.32.3 - six==1.17.0 - testtools==2.7.2 - tomli==2.2.1 - urllib3==2.3.0 prefix: /opt/conda/envs/falcon
[ "tests/test_req_vars.py::TestReqVars::test_client_accepts_props" ]
[ "tests/test_req_vars.py::TestReqVars::test_client_accepts" ]
[ "tests/test_req_vars.py::TestReqVars::test_attribute_headers", "tests/test_req_vars.py::TestReqVars::test_bogus_content_length_nan", "tests/test_req_vars.py::TestReqVars::test_bogus_content_length_neg", "tests/test_req_vars.py::TestReqVars::test_client_accepts_bogus", "tests/test_req_vars.py::TestReqVars::test_client_prefers", "tests/test_req_vars.py::TestReqVars::test_content_length", "tests/test_req_vars.py::TestReqVars::test_content_length_method", "tests/test_req_vars.py::TestReqVars::test_content_type_method", "tests/test_req_vars.py::TestReqVars::test_date_1___Date____date__", "tests/test_req_vars.py::TestReqVars::test_date_2___If_Modified_since____if_modified_since__", "tests/test_req_vars.py::TestReqVars::test_date_3___If_Unmodified_since____if_unmodified_since__", "tests/test_req_vars.py::TestReqVars::test_date_invalid_1___Date____date__", "tests/test_req_vars.py::TestReqVars::test_date_invalid_2___If_Modified_Since____if_modified_since__", "tests/test_req_vars.py::TestReqVars::test_date_invalid_3___If_Unmodified_Since____if_unmodified_since__", "tests/test_req_vars.py::TestReqVars::test_date_missing_1_date", "tests/test_req_vars.py::TestReqVars::test_date_missing_2_if_modified_since", "tests/test_req_vars.py::TestReqVars::test_date_missing_3_if_unmodified_since", "tests/test_req_vars.py::TestReqVars::test_empty", "tests/test_req_vars.py::TestReqVars::test_empty_path", "tests/test_req_vars.py::TestReqVars::test_host", "tests/test_req_vars.py::TestReqVars::test_method", "tests/test_req_vars.py::TestReqVars::test_missing_attribute_header", "tests/test_req_vars.py::TestReqVars::test_missing_qs", "tests/test_req_vars.py::TestReqVars::test_nonlatin_path", "tests/test_req_vars.py::TestReqVars::test_range", "tests/test_req_vars.py::TestReqVars::test_range_invalid", "tests/test_req_vars.py::TestReqVars::test_range_unit", "tests/test_req_vars.py::TestReqVars::test_reconstruct_url", "tests/test_req_vars.py::TestReqVars::test_relative_uri", "tests/test_req_vars.py::TestReqVars::test_subdomain", "tests/test_req_vars.py::TestReqVars::test_uri", "tests/test_req_vars.py::TestReqVars::test_uri_http_1_0", "tests/test_req_vars.py::TestReqVars::test_uri_https" ]
[]
Apache License 2.0
346
jupyter-incubator__sparkmagic-89
5d7c9a29da1f4a3a12fc9cd821807b474625afc1
2015-12-22 00:54:05
5d7c9a29da1f4a3a12fc9cd821807b474625afc1
diff --git a/remotespark/sparkkernelbase.py b/remotespark/sparkkernelbase.py index 3abecfd..bd4c70f 100644 --- a/remotespark/sparkkernelbase.py +++ b/remotespark/sparkkernelbase.py @@ -9,6 +9,11 @@ from remotespark.utils.utils import get_connection_string class SparkKernelBase(IPythonKernel): + run_command = "run" + config_command = "config" + sql_command = "sql" + hive_command = "hive" + def __init__(self, implementation, implementation_version, language, language_version, language_info, kernel_conf_name, session_language, client_name, **kwargs): # Required by Jupyter - Override @@ -32,7 +37,7 @@ class SparkKernelBase(IPythonKernel): # Disable warnings for test env in HDI requests.packages.urllib3.disable_warnings() - if "testing" not in kwargs.keys(): + if not kwargs.get("testing", False): (username, password, url) = self._get_configuration() self.connection_string = get_connection_string(url, username, password) self._load_magics_extension() @@ -41,29 +46,40 @@ class SparkKernelBase(IPythonKernel): if self._fatal_error is not None: self._abort_with_fatal_error(self._fatal_error) - if not self.session_started: - self._start_session() - - # Modify code by prepending spark magic text - if code.lower().startswith("%sql\n") or code.lower().startswith("%sql "): - code = "%%spark -c sql\n{}".format(code[5:]) - elif code.lower().startswith("%%sql\n") or code.lower().startswith("%%sql "): - code = "%%spark -c sql\n{}".format(code[6:]) - elif code.lower().startswith("%hive\n") or code.lower().startswith("%hive "): - code = "%%spark -c hive\n{}".format(code[6:]) - elif code.lower().startswith("%%hive\n") or code.lower().startswith("%%hive "): - code = "%%spark -c hive\n{}".format(code[7:]) + subcommand, flags, code_to_run = self._parse_user_command(code) + + if subcommand == self.run_command: + code_to_run = "%%spark\n{}".format(code_to_run) + return self._run_starting_session(code_to_run, silent, store_history, user_expressions, allow_stdin) + elif subcommand == self.sql_command: + code_to_run = "%%spark -c sql\n{}".format(code_to_run) + return self._run_starting_session(code_to_run, silent, store_history, user_expressions, allow_stdin) + elif subcommand == self.hive_command: + code_to_run = "%%spark -c hive\n{}".format(code_to_run) + return self._run_starting_session(code_to_run, silent, store_history, user_expressions, allow_stdin) + elif subcommand == self.config_command: + restart_session = False + + if self.session_started: + if "f" not in flags: + raise KeyError("A session has already been started. In order to modify the Spark configuration, " + "please provide the '-f' flag at the beginning of the config magic:\n\te.g. `%config" + " -f {}`\n\nNote that this will kill the current session and will create a new one " + "with the configuration provided. All previously run commands in the session will be" + " lost.") + else: + restart_session = True + + code_to_run = "%%spark config {}".format(code_to_run) + + return self._run_restarting_session(code_to_run, silent, store_history, user_expressions, allow_stdin, + restart_session) else: - code = "%%spark\n{}".format(code) - - return self._execute_cell(code, silent, store_history, user_expressions, allow_stdin) + raise KeyError("Magic '{}' not supported.".format(subcommand)) def do_shutdown(self, restart): # Cleanup - if self.session_started: - code = "%spark cleanup" - self._execute_cell_for_user(code, True, False) - self.session_started = False + self._delete_session() return self._do_shutdown_ipykernel(restart) @@ -83,6 +99,27 @@ class SparkKernelBase(IPythonKernel): log_if_error="Failed to create a Livy session.") self.logger.debug("Added session.") + def _delete_session(self): + if self.session_started: + code = "%spark cleanup" + self._execute_cell_for_user(code, True, False) + self.session_started = False + + def _run_starting_session(self, code, silent, store_history, user_expressions, allow_stdin): + self._start_session() + return self._execute_cell(code, silent, store_history, user_expressions, allow_stdin) + + def _run_restarting_session(self, code, silent, store_history, user_expressions, allow_stdin, restart): + if restart: + self._delete_session() + + res = self._execute_cell(code, silent, store_history, user_expressions, allow_stdin) + + if restart: + self._start_session() + + return res + def _get_configuration(self): try: credentials = getattr(conf, 'kernel_' + self.kernel_conf_name + '_credentials')() @@ -95,6 +132,35 @@ class SparkKernelBase(IPythonKernel): self.kernel_conf_name) self._abort_with_fatal_error(message) + def _parse_user_command(self, code): + # Normalize 2 signs to 1 + if code.startswith("%%"): + code = code[1:] + + # When no magic, return run command + if not code.startswith("%"): + code = "%{} {}".format(self.run_command, code) + + # Remove percentage sign + code = code[1:] + + split_code = code.split(None, 1) + subcommand = split_code[0].lower() + flags = [] + rest = split_code[1] + + # Get all flags + flag_split = rest.split(None, 1) + while len(flag_split) >= 2 and flag_split[0].startswith("-"): + flags.append(flag_split[0][1:].lower()) + rest = flag_split[1] + flag_split = rest.split(None, 1) + + # flags to lower + flags = [i.lower() for i in flags] + + return subcommand, flags, rest + def _execute_cell(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False, shutdown_if_error=False, log_if_error=None): reply_content = self._execute_cell_for_user(code, silent, store_history, user_expressions, allow_stdin)
Expose session configs through wrapper kernel
jupyter-incubator/sparkmagic
diff --git a/tests/test_sparkkernelbase.py b/tests/test_sparkkernelbase.py index a3c3ce1..d5f105c 100644 --- a/tests/test_sparkkernelbase.py +++ b/tests/test_sparkkernelbase.py @@ -1,5 +1,5 @@ from mock import MagicMock, call -from nose.tools import with_setup +from nose.tools import with_setup, raises from remotespark.sparkkernelbase import SparkKernelBase import remotespark.utils.configuration as conf @@ -53,7 +53,7 @@ def test_get_config(): pwd = "p" url = "url" - config = { "kernel_python_credentials": {user_ev: usr, pass_ev: pwd, url_ev: url} } + config = {"kernel_python_credentials": {user_ev: usr, pass_ev: pwd, url_ev: url}} conf.override(config) u, p, r = kernel._get_configuration() @@ -93,7 +93,55 @@ def test_start_session(): assert kernel.session_started assert call("%spark add TestKernel python {} skip".format(conn_str), True, False, None, False) \ - in execute_cell_mock.mock_calls + in execute_cell_mock.mock_calls + + +@with_setup(_setup(), _teardown()) +def test_delete_session(): + kernel.session_started = True + + kernel._delete_session() + + assert not kernel.session_started + assert call("%spark cleanup", True, False) in execute_cell_mock.mock_calls + + +@with_setup(_setup, _teardown) +def test_set_config(): + def _check(prepend, session_started=False, key_error_expected=False): + # Set up + properties = """{"extra": 2}""" + code = prepend + properties + kernel.session_started = session_started + execute_cell_mock.reset_mock() + + # Call method + try: + kernel.do_execute(code, False) + except KeyError: + if not key_error_expected: + assert False + + # When exception is expected, nothing to check + return + + assert session_started == kernel.session_started + assert call("%%spark config {}".format(properties), False, True, None, False) \ + in execute_cell_mock.mock_calls + + if session_started and not key_error_expected: + # This means -f must be present, so check that a restart happened + assert call("%spark cleanup", True, False) in execute_cell_mock.mock_calls + assert call("%spark add TestKernel python {} skip".format(conn_str), True, False, None, False) \ + in execute_cell_mock.mock_calls + + _check("%config ") + _check("%config\n") + _check("%%config ") + _check("%%config\n") + _check("%config -f ") + _check("%config ", True, True) + _check("%config -f ", True, False) @with_setup(_setup, _teardown) @@ -111,6 +159,17 @@ def test_do_execute_initializes_magics_if_not_run(): assert call("%%spark\n{}".format(code), False, True, None, False) in execute_cell_mock.mock_calls +@with_setup(_setup, _teardown) +@raises(KeyError) +def test_magic_not_supported(): + # Set up + code = "%alex some spark code" + kernel.session_started = True + + # Call method + kernel.do_execute(code, False) + + @with_setup(_setup, _teardown) def test_call_spark(): # Set up
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 1 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.7", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
anyio==3.7.1 argon2-cffi==23.1.0 argon2-cffi-bindings==21.2.0 attrs==24.2.0 beautifulsoup4==4.13.3 bleach==6.0.0 certifi @ file:///croot/certifi_1671487769961/work/certifi cffi==1.15.1 charset-normalizer==3.4.1 comm==0.1.4 decorator==5.1.1 defusedxml==0.7.1 entrypoints==0.4 exceptiongroup==1.2.2 fastjsonschema==2.21.1 idna==3.10 importlib-metadata==6.7.0 importlib-resources==5.12.0 iniconfig==2.0.0 ipykernel==4.1.1 ipython==4.0.0 ipython-genutils==0.2.0 ipywidgets==7.8.5 Jinja2==3.1.6 jsonschema==4.17.3 jupyter-server==1.24.0 jupyter_client==7.4.9 jupyter_core==4.12.0 jupyterlab-pygments==0.2.2 jupyterlab_widgets==1.1.11 MarkupSafe==2.1.5 mistune==3.0.2 mock==5.2.0 nbclassic==1.2.0 nbclient==0.7.4 nbconvert==7.6.0 nbformat==5.8.0 nest-asyncio==1.6.0 nose==1.3.7 notebook==6.5.7 notebook_shim==0.2.4 numpy==1.21.6 packaging==24.0 pandas==1.3.5 pandocfilters==1.5.1 pexpect==4.9.0 pickleshare==0.7.5 pkgutil_resolve_name==1.3.10 plotly==5.18.0 pluggy==1.2.0 prometheus-client==0.17.1 ptyprocess==0.7.0 pycparser==2.21 Pygments==2.17.2 pyrsistent==0.19.3 pytest==7.4.4 python-dateutil==2.9.0.post0 pytz==2025.2 pyzmq==26.2.1 -e git+https://github.com/jupyter-incubator/sparkmagic.git@5d7c9a29da1f4a3a12fc9cd821807b474625afc1#egg=remotespark requests==2.31.0 Send2Trash==1.8.3 simplegeneric==0.8.1 six==1.17.0 sniffio==1.3.1 soupsieve==2.4.1 tenacity==8.2.3 terminado==0.17.1 tinycss2==1.2.1 tomli==2.0.1 tornado==6.2 traitlets==5.9.0 typing_extensions==4.7.1 urllib3==2.0.7 webencodings==0.5.1 websocket-client==1.6.1 widgetsnbextension==3.6.10 zipp==3.15.0
name: sparkmagic channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - anyio==3.7.1 - argon2-cffi==23.1.0 - argon2-cffi-bindings==21.2.0 - attrs==24.2.0 - beautifulsoup4==4.13.3 - bleach==6.0.0 - cffi==1.15.1 - charset-normalizer==3.4.1 - comm==0.1.4 - decorator==5.1.1 - defusedxml==0.7.1 - entrypoints==0.4 - exceptiongroup==1.2.2 - fastjsonschema==2.21.1 - idna==3.10 - importlib-metadata==6.7.0 - importlib-resources==5.12.0 - iniconfig==2.0.0 - ipykernel==4.1.1 - ipython==4.0.0 - ipython-genutils==0.2.0 - ipywidgets==7.8.5 - jinja2==3.1.6 - jsonschema==4.17.3 - jupyter-client==7.4.9 - jupyter-core==4.12.0 - jupyter-server==1.24.0 - jupyterlab-pygments==0.2.2 - jupyterlab-widgets==1.1.11 - markupsafe==2.1.5 - mistune==3.0.2 - mock==5.2.0 - nbclassic==1.2.0 - nbclient==0.7.4 - nbconvert==7.6.0 - nbformat==5.8.0 - nest-asyncio==1.6.0 - nose==1.3.7 - notebook==6.5.7 - notebook-shim==0.2.4 - numpy==1.21.6 - packaging==24.0 - pandas==1.3.5 - pandocfilters==1.5.1 - pexpect==4.9.0 - pickleshare==0.7.5 - pkgutil-resolve-name==1.3.10 - plotly==5.18.0 - pluggy==1.2.0 - prometheus-client==0.17.1 - ptyprocess==0.7.0 - pycparser==2.21 - pygments==2.17.2 - pyrsistent==0.19.3 - pytest==7.4.4 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyzmq==26.2.1 - requests==2.31.0 - send2trash==1.8.3 - simplegeneric==0.8.1 - six==1.17.0 - sniffio==1.3.1 - soupsieve==2.4.1 - tenacity==8.2.3 - terminado==0.17.1 - tinycss2==1.2.1 - tomli==2.0.1 - tornado==6.2 - traitlets==5.9.0 - typing-extensions==4.7.1 - urllib3==2.0.7 - webencodings==0.5.1 - websocket-client==1.6.1 - widgetsnbextension==3.6.10 - zipp==3.15.0 prefix: /opt/conda/envs/sparkmagic
[ "tests/test_sparkkernelbase.py::test_delete_session", "tests/test_sparkkernelbase.py::test_set_config", "tests/test_sparkkernelbase.py::test_magic_not_supported" ]
[]
[ "tests/test_sparkkernelbase.py::test_get_config", "tests/test_sparkkernelbase.py::test_get_config_not_set", "tests/test_sparkkernelbase.py::test_initialize_magics", "tests/test_sparkkernelbase.py::test_start_session", "tests/test_sparkkernelbase.py::test_do_execute_initializes_magics_if_not_run", "tests/test_sparkkernelbase.py::test_call_spark", "tests/test_sparkkernelbase.py::test_execute_throws_if_fatal_error_happened", "tests/test_sparkkernelbase.py::test_execute_throws_if_fatal_error_happens_for_execution", "tests/test_sparkkernelbase.py::test_call_spark_sql_new_line", "tests/test_sparkkernelbase.py::test_call_spark_hive_new_line", "tests/test_sparkkernelbase.py::test_shutdown_cleans_up" ]
[]
Modified BSD License
347
box__box-python-sdk-99
f365c177f70ce6cfc2d53528ed649cdac20bb43d
2015-12-22 19:59:14
f365c177f70ce6cfc2d53528ed649cdac20bb43d
diff --git a/HISTORY.rst b/HISTORY.rst index b567bfa..9af5bb4 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -6,7 +6,17 @@ Release History Upcoming ++++++++ -1.3.2 +1.3.3 (2015-12-22) +++++++++++++++++++ + +- Added a new class, ``DeveloperTokenClient`` that makes it easy to get started using the SDK with a Box developer + token. It uses another new class, ``DeveloperTokenAuth`` for auth. + +**Bugfixes** + +- Added limit, offset, and filter_term parameters to ``client.users()`` to match up with the Box API. + +1.3.2 (2015-11-16) ++++++++++++++++++ - Fix ``boxsdk.util.log.setup_logging()`` on Python 3. diff --git a/boxsdk/client.py b/boxsdk/client.py index 8a04d76..f84d9a3 100644 --- a/boxsdk/client.py +++ b/boxsdk/client.py @@ -100,21 +100,48 @@ def group(self, group_id): """ return Group(session=self._session, object_id=group_id) - def users(self): + def users(self, limit=None, offset=0, filter_term=None): """ Get a list of all users for the Enterprise along with their user_id, public_name, and login. + :param limit: + The maximum number of users to return. If not specified, the Box API will determine an appropriate limit. + :type limit: + `int` or None + :param offset: + The user index at which to start the response. + :type offset: + `int` + :param filter_term: + Filters the results to only users starting with the filter_term in either the name or the login. + :type filter_term: + `unicode` or None :return: The list of all users in the enterprise. :rtype: `list` of :class:`User` """ url = '{0}/users'.format(API.BASE_API_URL) - box_response = self._session.get(url) + params = dict(offset=offset) + if limit is not None: + params['limit'] = limit + if filter_term is not None: + params['filter_term'] = filter_term + box_response = self._session.get(url, params=params) response = box_response.json() return [User(self._session, item['id'], item) for item in response['entries']] - def search(self, query, limit, offset, ancestor_folders=None, file_extensions=None, metadata_filters=None, result_type=None, content_types=None): + def search( + self, + query, + limit, + offset, + ancestor_folders=None, + file_extensions=None, + metadata_filters=None, + result_type=None, + content_types=None + ): """ Search Box for items matching the given query. @@ -155,14 +182,16 @@ def search(self, query, limit, offset, ancestor_folders=None, file_extensions=No :rtype: `list` of :class:`Item` """ - return Search(self._session).search(query=query, - limit=limit, - offset=offset, - ancestor_folders=ancestor_folders, - file_extensions=file_extensions, - metadata_filters=metadata_filters, - result_type=result_type, - content_types=content_types) + return Search(self._session).search( + query=query, + limit=limit, + offset=offset, + ancestor_folders=ancestor_folders, + file_extensions=file_extensions, + metadata_filters=metadata_filters, + result_type=result_type, + content_types=content_types, + ) def events(self): """ @@ -333,5 +362,8 @@ def with_shared_link(self, shared_link, shared_link_password): class DeveloperTokenClient(Client): + """ + Box client subclass which authorizes with a developer token. + """ def __init__(self, oauth=None, network_layer=None, session=None): super(DeveloperTokenClient, self).__init__(oauth or DeveloperTokenAuth(), network_layer, session) diff --git a/setup.py b/setup.py index 2d16b34..59885f9 100644 --- a/setup.py +++ b/setup.py @@ -61,7 +61,7 @@ def main(): install_requires.append('ordereddict>=1.1') setup( name='boxsdk', - version='1.3.2', + version='1.3.3', description='Official Box Python SDK', long_description=open(join(base_dir, 'README.rst')).read(), author='Box', @@ -69,7 +69,7 @@ def main(): url='http://opensource.box.com', packages=find_packages(exclude=['demo', 'docs', 'test']), install_requires=install_requires, - extras_require={'jwt': jwt_requires, 'redis': redis_requires}, + extras_require={'jwt': jwt_requires, 'redis': redis_requires, 'all': jwt_requires + redis_requires}, tests_require=['pytest', 'pytest-xdist', 'mock', 'sqlalchemy', 'bottle', 'jsonpatch'], cmdclass={'test': PyTest}, classifiers=CLASSIFIERS,
The get all users in enterprise API needs to be pageable Right now it just return a list of :class:`User`
box/box-python-sdk
diff --git a/test/unit/test_client.py b/test/unit/test_client.py index ef6ba70..81f2341 100644 --- a/test/unit/test_client.py +++ b/test/unit/test_client.py @@ -159,10 +159,40 @@ def test_factory_returns_the_correct_object(mock_client, test_class, factory_met assert obj.object_id == fake_id -def test_users_return_the_correct_user_objects(mock_client, mock_box_session, users_response, user_id_1, user_id_2): [email protected](scope='module', params=(None, 'user1')) +def users_filter_term(request): + return request.param + + [email protected](scope='module', params=(0, 10)) +def users_offset(request): + return request.param + + [email protected](scope='module', params=(0, 10)) +def users_limit(request): + return request.param + + +def test_users_return_the_correct_user_objects( + mock_client, + mock_box_session, + users_response, + user_id_1, + user_id_2, + users_filter_term, + users_offset, + users_limit, +): # pylint:disable=redefined-outer-name mock_box_session.get.return_value = users_response - users = mock_client.users() + users = mock_client.users(users_limit, users_offset, users_filter_term) + expected_params = {'offset': users_offset} + if users_limit is not None: + expected_params['limit'] = users_limit + if users_filter_term is not None: + expected_params['filter_term'] = users_filter_term + mock_box_session.get.assert_called_once_with('{0}/users'.format(API.BASE_API_URL), params=expected_params) assert users[0].object_id == user_id_1 assert users[1].object_id == user_id_2
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 2 }, "num_modified_files": 3 }
1.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.5", "reqs_path": [ "requirements.txt", "requirements-dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.13 astroid==2.11.7 async-timeout==4.0.2 attrs==22.2.0 Babel==2.11.0 bottle==0.13.2 -e git+https://github.com/box/box-python-sdk.git@f365c177f70ce6cfc2d53528ed649cdac20bb43d#egg=boxsdk certifi==2021.5.30 cffi==1.15.1 charset-normalizer==2.0.12 coverage==6.2 cryptography==40.0.2 dill==0.3.4 distlib==0.3.9 docutils==0.18.1 execnet==1.9.0 filelock==3.4.1 greenlet==2.0.2 idna==3.10 imagesize==1.4.1 importlib-metadata==4.8.3 importlib-resources==5.4.0 iniconfig==1.1.1 isort==5.10.1 Jinja2==3.0.3 jsonpatch==1.32 jsonpointer==2.3 lazy-object-proxy==1.7.1 MarkupSafe==2.0.1 mccabe==0.7.0 mock==1.0.1 packaging==21.3 pep8==1.7.1 platformdirs==2.4.0 pluggy==1.0.0 py==1.11.0 pycparser==2.21 Pygments==2.14.0 PyJWT==2.4.0 pylint==2.13.9 pyparsing==3.1.4 pytest==7.0.1 pytest-asyncio==0.16.0 pytest-cov==2.5.1 pytest-mock==3.6.1 pytest-xdist==1.17.1 pytz==2025.2 redis==4.3.6 requests==2.27.1 requests-toolbelt==1.0.0 six==1.17.0 snowballstemmer==2.2.0 Sphinx==5.3.0 sphinxcontrib-applehelp==1.0.2 sphinxcontrib-devhelp==1.0.2 sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 sphinxcontrib-serializinghtml==1.1.5 SQLAlchemy==1.4.54 swebench-matterhorn @ file:///swebench_matterhorn toml==0.10.2 tomli==1.2.3 tox==3.28.0 typed-ast==1.5.5 typing_extensions==4.1.1 urllib3==1.26.20 virtualenv==20.17.1 wrapt==1.16.0 zipp==3.6.0
name: box-python-sdk channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.13 - astroid==2.11.7 - async-timeout==4.0.2 - attrs==22.2.0 - babel==2.11.0 - bottle==0.13.2 - cffi==1.15.1 - charset-normalizer==2.0.12 - coverage==6.2 - cryptography==40.0.2 - dill==0.3.4 - distlib==0.3.9 - docutils==0.18.1 - execnet==1.9.0 - filelock==3.4.1 - greenlet==2.0.2 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==4.8.3 - importlib-resources==5.4.0 - iniconfig==1.1.1 - isort==5.10.1 - jinja2==3.0.3 - jsonpatch==1.32 - jsonpointer==2.3 - lazy-object-proxy==1.7.1 - markupsafe==2.0.1 - mccabe==0.7.0 - mock==1.0.1 - packaging==21.3 - pep8==1.7.1 - platformdirs==2.4.0 - pluggy==1.0.0 - py==1.11.0 - pycparser==2.21 - pygments==2.14.0 - pyjwt==2.4.0 - pylint==2.13.9 - pyparsing==3.1.4 - pytest==7.0.1 - pytest-asyncio==0.16.0 - pytest-cov==2.5.1 - pytest-mock==3.6.1 - pytest-xdist==1.17.1 - pytz==2025.2 - redis==4.3.6 - requests==2.27.1 - requests-toolbelt==1.0.0 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==5.3.0 - sphinxcontrib-applehelp==1.0.2 - sphinxcontrib-devhelp==1.0.2 - sphinxcontrib-htmlhelp==2.0.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==1.0.3 - sphinxcontrib-serializinghtml==1.1.5 - sqlalchemy==1.4.54 - swebench-matterhorn==0.0.0 - toml==0.10.2 - tomli==1.2.3 - tox==3.28.0 - typed-ast==1.5.5 - typing-extensions==4.1.1 - urllib3==1.26.20 - virtualenv==20.17.1 - wrapt==1.16.0 - zipp==3.6.0 prefix: /opt/conda/envs/box-python-sdk
[ "test/unit/test_client.py::test_users_return_the_correct_user_objects[None-0-0]", "test/unit/test_client.py::test_users_return_the_correct_user_objects[None-10-0]", "test/unit/test_client.py::test_users_return_the_correct_user_objects[None-10-10]", "test/unit/test_client.py::test_users_return_the_correct_user_objects[user1-10-10]", "test/unit/test_client.py::test_users_return_the_correct_user_objects[user1-0-10]", "test/unit/test_client.py::test_users_return_the_correct_user_objects[user1-10-0]", "test/unit/test_client.py::test_users_return_the_correct_user_objects[user1-0-0]", "test/unit/test_client.py::test_users_return_the_correct_user_objects[None-0-10]" ]
[]
[ "test/unit/test_client.py::test_factory_returns_the_correct_object[Folder-folder]", "test/unit/test_client.py::test_factory_returns_the_correct_object[File-file]", "test/unit/test_client.py::test_factory_returns_the_correct_object[User-user]", "test/unit/test_client.py::test_factory_returns_the_correct_object[Group-group]", "test/unit/test_client.py::test_factory_returns_the_correct_object[GroupMembership-group_membership]", "test/unit/test_client.py::test_search_instantiates_search_and_calls_search", "test/unit/test_client.py::test_events_returns_event_object", "test/unit/test_client.py::test_groups_return_the_correct_group_objects", "test/unit/test_client.py::test_create_group_returns_the_correct_group_object", "test/unit/test_client.py::test_get_shared_item_returns_the_correct_item[file-None]", "test/unit/test_client.py::test_get_shared_item_returns_the_correct_item[file-p4ssw0rd]", "test/unit/test_client.py::test_get_shared_item_returns_the_correct_item[folder-None]", "test/unit/test_client.py::test_get_shared_item_returns_the_correct_item[folder-p4ssw0rd]", "test/unit/test_client.py::test_make_request_passes_request_on_to_session[get]", "test/unit/test_client.py::test_make_request_passes_request_on_to_session[post]", "test/unit/test_client.py::test_make_request_passes_request_on_to_session[put]", "test/unit/test_client.py::test_make_request_passes_request_on_to_session[delete]", "test/unit/test_client.py::test_make_request_passes_request_on_to_session[options]", "test/unit/test_client.py::test_create_app_user_returns_the_correct_user_object", "test/unit/test_client.py::test_create_enterprise_user_returns_the_correct_user_object" ]
[]
Apache License 2.0
348
jupyter-incubator__sparkmagic-93
7025cb5b607abde80d5dd8a701c40c3598801e9d
2015-12-23 01:03:52
7025cb5b607abde80d5dd8a701c40c3598801e9d
diff --git a/remotespark/livyclientlib/livyclientfactory.py b/remotespark/livyclientlib/livyclientfactory.py index f7b2ae3..3bdda7f 100644 --- a/remotespark/livyclientlib/livyclientfactory.py +++ b/remotespark/livyclientlib/livyclientfactory.py @@ -30,11 +30,15 @@ class LivyClientFactory(object): @staticmethod def create_session(connection_string, properties, session_id="-1", sql_created=False): - cso = get_connection_string_elements(connection_string) - - retry_policy = LinearRetryPolicy(seconds_to_sleep=5, max_retries=5) - http_client = LivyReliableHttpClient(cso.url, cso.username, cso.password, retry_policy) + http_client = LivyClientFactory.create_http_client(connection_string) session = LivySession(http_client, session_id, sql_created, properties) return session + + @staticmethod + def create_http_client(connection_string): + cso = get_connection_string_elements(connection_string) + + retry_policy = LinearRetryPolicy(seconds_to_sleep=5, max_retries=5) + return LivyReliableHttpClient(cso.url, cso.username, cso.password, retry_policy) diff --git a/remotespark/livyclientlib/reliablehttpclient.py b/remotespark/livyclientlib/reliablehttpclient.py index bcc14d5..c146dae 100644 --- a/remotespark/livyclientlib/reliablehttpclient.py +++ b/remotespark/livyclientlib/reliablehttpclient.py @@ -7,7 +7,6 @@ from time import sleep import requests import remotespark.utils.configuration as conf -from remotespark.utils.constants import Constants from remotespark.utils.log import Log from remotespark.utils.utils import get_connection_string diff --git a/remotespark/livyclientlib/sparkcontroller.py b/remotespark/livyclientlib/sparkcontroller.py index da60fb7..1254f48 100644 --- a/remotespark/livyclientlib/sparkcontroller.py +++ b/remotespark/livyclientlib/sparkcontroller.py @@ -20,24 +20,49 @@ class SparkController(object): else: self.client_manager = ClientManager() - def run_cell(self, cell, client_name = None): + def run_cell(self, cell, client_name=None): client_to_use = self.get_client_by_name_or_default(client_name) return client_to_use.execute(cell) - def run_cell_sql(self, cell, client_name = None): + def run_cell_sql(self, cell, client_name=None): client_to_use = self.get_client_by_name_or_default(client_name) return client_to_use.execute_sql(cell) - def run_cell_hive(self, cell, client_name = None): + def run_cell_hive(self, cell, client_name=None): client_to_use = self.get_client_by_name_or_default(client_name) return client_to_use.execute_hive(cell) + def get_all_sessions_endpoint(self, connection_string): + http_client = self.client_factory.create_http_client(connection_string) + r = http_client.get("/sessions", [200]) + sessions = r.json()["sessions"] + session_list = [self.client_factory.create_session(connection_string, {"kind": s["kind"]}, s["id"]) + for s in sessions] + for s in session_list: + s.refresh_status() + return session_list + + def get_all_sessions_endpoint_info(self, connection_string): + sessions = self.get_all_sessions_endpoint(connection_string) + return [str(s) for s in sessions] + def cleanup(self): self.client_manager.clean_up_all() + def cleanup_endpoint(self, connection_string): + for session in self.get_all_sessions_endpoint(connection_string): + session.delete() + def delete_session_by_name(self, name): self.client_manager.delete_client(name) + def delete_session_by_id(self, connection_string, session_id): + http_client = self.client_factory.create_http_client(connection_string) + r = http_client.get("/sessions/{}".format(session_id), [200, 404]) + if r.status_code != 404: + session = self.client_factory.create_session(connection_string, {"kind": r.json()["kind"]}, session_id, False) + session.delete() + def add_session(self, name, connection_string, skip_if_exists, properties): if skip_if_exists and (name in self.client_manager.get_sessions_list()): self.logger.debug("Skipping {} because it already exists in list of sessions.".format(name)) @@ -60,4 +85,3 @@ class SparkController(object): else: client_name = client_name.lower() return self.client_manager.get_client(client_name) - diff --git a/remotespark/remotesparkmagics.py b/remotespark/remotesparkmagics.py index d1f40b3..705954f 100644 --- a/remotespark/remotesparkmagics.py +++ b/remotespark/remotesparkmagics.py @@ -43,6 +43,8 @@ class RemoteSparkMagics(Magics): except KeyError: self.logger.error("Could not read env vars for serialization.") + self.properties = conf.session_configs() + self.logger.debug("Initialized spark magics.") @magic_arguments() @@ -107,12 +109,20 @@ class RemoteSparkMagics(Magics): # info if subcommand == "info": - self._print_info() + if len(args.command) == 2: + connection_string = args.command[1] + info_sessions = self.spark_controller.get_all_sessions_endpoint_info(connection_string) + self._print_endpoint_info(info_sessions) + elif len(args.command) == 1: + self._print_local_info() + else: + raise ValueError("Subcommand 'info' requires no value or a connection string to show all sessions. " + "{}".format(usage)) # config elif subcommand == "config": # Would normally do " ".join(args.command[1:]) but parse_argstring removes quotes... rest_of_line = user_input[7:] - conf.override(conf.session_configs.__name__, json.loads(rest_of_line)) + self.properties = json.loads(rest_of_line) # add elif subcommand == "add": if len(args.command) != 4 and len(args.command) != 5: @@ -127,19 +137,32 @@ class RemoteSparkMagics(Magics): else: skip = False - properties = copy.deepcopy(conf.session_configs()) + properties = copy.deepcopy(self.properties) properties["kind"] = self._get_livy_kind(language) self.spark_controller.add_session(name, connection_string, skip, properties) # delete elif subcommand == "delete": - if len(args.command) != 2: - raise ValueError("Subcommand 'delete' requires an argument. {}".format(usage)) - name = args.command[1].lower() - self.spark_controller.delete_session_by_name(name) + if len(args.command) == 2: + name = args.command[1].lower() + self.spark_controller.delete_session_by_name(name) + elif len(args.command) == 3: + connection_string = args.command[1] + session_id = args.command[2] + self.spark_controller.delete_session_by_id(connection_string, session_id) + else: + raise ValueError("Subcommand 'delete' requires a session name, or a connection string and id. {}" + .format(usage)) # cleanup elif subcommand == "cleanup": - self.spark_controller.cleanup() + if len(args.command) == 2: + connection_string = args.command[1] + self.spark_controller.cleanup_endpoint(connection_string) + elif len(args.command) == 1: + self.spark_controller.cleanup() + else: + raise ValueError("Subcommand 'cleanup' requires no value or a connection string to clean up sessions. " + "{}".format(usage)) # run elif len(subcommand) == 0: if args.context == Constants.context_name_spark: @@ -170,15 +193,21 @@ class RemoteSparkMagics(Magics): self.shell.write_err(e.out) return None - def _print_info(self): - sessions_info = ["\t\t{}".format(i) for i in self.spark_controller.get_manager_sessions_str()] + def _print_local_info(self): + sessions_info = [" {}".format(i) for i in self.spark_controller.get_manager_sessions_str()] print("""Info for running Spark: Sessions: {} Session configs: {} -""".format("\n".join(sessions_info), conf.session_configs())) +""".format("\n".join(sessions_info), self.properties)) + def _print_endpoint_info(self, info_sessions): + sessions_info = [" {}".format(i) for i in info_sessions] + print("""Info for endpoint: + Sessions: +{} +""".format("\n".join(sessions_info))) @staticmethod def _get_livy_kind(language): diff --git a/remotespark/utils/configuration.py b/remotespark/utils/configuration.py index 5668901..df09e52 100644 --- a/remotespark/utils/configuration.py +++ b/remotespark/utils/configuration.py @@ -36,23 +36,16 @@ def load(fsrw_class = None): overrides = {} else: overrides = json.loads(line) - override_all(overrides) + override(overrides) -def override_all(obj): +def override(obj): """Given a dictionary representing the overrided defaults for this configuration, initialize the global configuration.""" global _overrides _overrides = obj -def override(config, value): - """Given a string representing a configuration and a value for that configuration, - override the configuration. Initialize the overrided configuration beforehand.""" - initialize() - _overrides[config] = value - - def _override(f): """A decorator which first initializes the overrided configurations, then checks the global overrided defaults for the given configuration,
Manage livy endpoint from magics This will be the API: * `%spark add session_name language conn_string` will create a session against the endpoint specified * `%spark info` will display the info for the sessions created in that notebook * `%spark config <configuration_overrides>` will add session configs for subsequent sessions * `%spark info conn_string` will list the sessions for a given livy endpoint by providing `session_id, language, state` * `%spark delete session_name` will delete a session by its name from the notebook that created it * `%spark delete conn_string session_id` will delete a session for a given endpoint by its id * `%spark cleanup` will delete all sessions created by the notebook * `%spark cleanup conn_string` will delete all session for the given livy endpoint This covers #56, #75, and #76 for magics in Python kernel. We are not designing the API for the wrapper kernels here and we'll tackle that as a separate improvement. ping @msftristew @ellisonbg to take a look when they can
jupyter-incubator/sparkmagic
diff --git a/tests/test_clientmanager.py b/tests/test_clientmanager.py index 400fadd..e088b35 100644 --- a/tests/test_clientmanager.py +++ b/tests/test_clientmanager.py @@ -30,8 +30,8 @@ def test_deserialize_on_creation(): def test_serialize_periodically(): - conf.override_all({conf.serialize_period_seconds.__name__: 0.1, - conf.serialize_periodically.__name__: True}) + conf.override({conf.serialize_period_seconds.__name__: 0.1, + conf.serialize_periodically.__name__: True}) serializer = MagicMock() ClientManager(serializer) diff --git a/tests/test_configuration.py b/tests/test_configuration.py index 8cdac08..92fd020 100644 --- a/tests/test_configuration.py +++ b/tests/test_configuration.py @@ -27,7 +27,7 @@ def test_configuration_initialize(): @with_setup(_setup) def test_configuration_initialize_lazy(): """Tests that the initialize function has no behavior if the override dict is already initialized""" - conf.override_all({}) + conf.override({}) fsrw_class = MagicMock(side_effect=ValueError) conf.initialize(fsrw_class) @@ -56,7 +56,7 @@ def test_configuration_load_not_lazy(): read_lines = MagicMock(return_value=[json.dumps(config)]) fsrw.read_lines = read_lines fsrw_class = MagicMock(return_value=fsrw) - conf.override_all({conf.default_chart_type.__name__: "bar"}) + conf.override({ conf.default_chart_type.__name__: "bar" }) conf.load(fsrw_class) assert conf._overrides is not None assert_equals(conf._overrides, config) @@ -65,21 +65,9 @@ def test_configuration_load_not_lazy(): @with_setup(_setup) def test_configuration_override(): - kpc = { 'username': 'U', 'password': 'P', 'url': 'L' } - overrides = { conf.kernel_python_credentials.__name__: kpc } - conf.override_all(overrides) - conf.override(conf.execute_timeout_seconds.__name__, 1) - assert_equals(conf._overrides, { conf.kernel_python_credentials.__name__: kpc, - conf.execute_timeout_seconds.__name__: 1 }) - assert_equals(conf.execute_timeout_seconds(), 1) - assert_equals(conf.kernel_python_credentials(), kpc) - - -@with_setup(_setup) -def test_configuration_override_all(): z = 1500 config = { conf.status_sleep_seconds.__name__: z } - conf.override_all(config) + conf.override(config) assert_equals(conf._overrides, config) assert_equals(conf.status_sleep_seconds(), z) @@ -88,7 +76,7 @@ def test_configuration_override_all(): def test_configuration_decorator(): def test_f(): return 0 - conf.override_all({test_f.__name__: -1}) + conf.override({test_f.__name__: -1}) test_f_decorated = conf._override(test_f) assert_not_equals(test_f_decorated(), test_f()) assert_equals(test_f_decorated(), -1) \ No newline at end of file diff --git a/tests/test_livysession.py b/tests/test_livysession.py index 1706786..d406532 100644 --- a/tests/test_livysession.py +++ b/tests/test_livysession.py @@ -19,6 +19,7 @@ class DummyResponse: def json(self): return json.loads(self._json_text) + @property def status_code(self): return self._status_code @@ -59,7 +60,7 @@ class TestLivySession: @raises(AssertionError) def test_constructor_throws_status_sleep_seconds(self): - conf.override_all({ + conf.override({ "status_sleep_seconds": 0, "statement_sleep_seconds": 2, "create_sql_context_timeout_seconds": 60 @@ -69,7 +70,7 @@ class TestLivySession: @raises(AssertionError) def test_constructor_throws_statement_sleep_seconds(self): - conf.override_all({ + conf.override({ "status_sleep_seconds": 3, "statement_sleep_seconds": 0, "create_sql_context_timeout_seconds": 60 @@ -79,7 +80,7 @@ class TestLivySession: @raises(AssertionError) def test_constructor_throws_sql_create_timeout_seconds(self): - conf.override_all({ + conf.override({ "status_sleep_seconds": 4, "statement_sleep_seconds": 2, "create_sql_context_timeout_seconds": 0 @@ -89,7 +90,7 @@ class TestLivySession: @raises(ValueError) def test_constructor_throws_invalid_session_sql_combo(self): - conf.override_all({ + conf.override({ "status_sleep_seconds": 2, "statement_sleep_seconds": 2, "create_sql_context_timeout_seconds": 60 @@ -98,7 +99,7 @@ class TestLivySession: conf.load() def test_constructor_starts_with_existing_session(self): - conf.override_all({ + conf.override({ "status_sleep_seconds": 4, "statement_sleep_seconds": 2, "create_sql_context_timeout_seconds": 60 @@ -111,7 +112,7 @@ class TestLivySession: assert session.started_sql_context def test_constructor_starts_with_no_session(self): - conf.override_all({ + conf.override({ "status_sleep_seconds": 4, "statement_sleep_seconds": 2, "create_sql_context_timeout_seconds": 60 @@ -123,7 +124,7 @@ class TestLivySession: assert not session.started_sql_context def test_is_final_status(self): - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -141,7 +142,7 @@ class TestLivySession: http_client = MagicMock() http_client.post.return_value = DummyResponse(201, self.session_create_json) - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -160,7 +161,7 @@ class TestLivySession: http_client = MagicMock() http_client.post.return_value = DummyResponse(201, self.session_create_json) - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -179,7 +180,7 @@ class TestLivySession: http_client = MagicMock() http_client.post.return_value = DummyResponse(201, self.session_create_json) - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -196,7 +197,7 @@ class TestLivySession: http_client = MagicMock() http_client.post.return_value = DummyResponse(201, self.session_create_json) http_client.get.return_value = DummyResponse(200, self.ready_sessions_json) - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -217,12 +218,12 @@ class TestLivySession: DummyResponse(200, self.ready_sessions_json)] http_client.get.side_effect = self._next_response_get - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) session = self._create_session(http_client=http_client) - conf.override_all({}) + conf.override({}) session.start() @@ -240,7 +241,7 @@ class TestLivySession: DummyResponse(200, self.error_sessions_json)] http_client.get.side_effect = self._next_response_get - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.011, "statement_sleep_seconds": 6000 }) @@ -260,7 +261,7 @@ class TestLivySession: DummyResponse(200, self.ready_sessions_json)] http_client.get.side_effect = self._next_response_get - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.011, "statement_sleep_seconds": 6000 }) @@ -274,7 +275,7 @@ class TestLivySession: def test_delete_session_when_active(self): http_client = MagicMock() http_client.post.return_value = DummyResponse(201, self.session_create_json) - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -290,7 +291,7 @@ class TestLivySession: def test_delete_session_when_not_started(self): http_client = MagicMock() http_client.post.return_value = DummyResponse(201, self.session_create_json) - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -306,7 +307,7 @@ class TestLivySession: def test_delete_session_when_dead_throws(self): http_client = MagicMock() http_client.post.return_value = DummyResponse(201, self.session_create_json) - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -325,7 +326,7 @@ class TestLivySession: self.get_responses = [DummyResponse(200, self.running_statement_json), DummyResponse(200, self.ready_statement_json)] http_client.get.side_effect = self._next_response_get - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -355,7 +356,7 @@ class TestLivySession: DummyResponse(200, self.ready_sessions_json), DummyResponse(200, self.ready_statement_json)] http_client.get.side_effect = self._next_response_get - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -391,7 +392,7 @@ class TestLivySession: DummyResponse(200, self.ready_sessions_json), DummyResponse(200, self.ready_statement_json)] http_client.get.side_effect = self._next_response_get - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -420,7 +421,7 @@ class TestLivySession: DummyResponse(200, self.ready_sessions_json), DummyResponse(200, self.ready_statement_json)] http_client.get.side_effect = self._next_response_get - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -448,7 +449,7 @@ class TestLivySession: DummyResponse(200, self.running_statement_json), DummyResponse(200, self.ready_statement_json)] http_client.get.side_effect = self._next_response_get - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -466,7 +467,7 @@ class TestLivySession: http_client = MagicMock() http_client.connection_string = connection_string kind = Constants.session_kind_spark - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) diff --git a/tests/test_remotesparkmagics.py b/tests/test_remotesparkmagics.py index eaffb2e..e8f2d8f 100644 --- a/tests/test_remotesparkmagics.py +++ b/tests/test_remotesparkmagics.py @@ -14,7 +14,7 @@ shell = None def _setup(): global magic, spark_controller, shell - conf.override_all({}) + conf.override({}) shell = MagicMock() magic = RemoteSparkMagics(shell=None) @@ -31,7 +31,7 @@ def _teardown(): @with_setup(_setup, _teardown) def test_info_command_parses(): print_info_mock = MagicMock() - magic._print_info = print_info_mock + magic._print_local_info = print_info_mock command = "info" magic.spark(command) @@ -39,6 +39,18 @@ def test_info_command_parses(): print_info_mock.assert_called_once_with() +@with_setup(_setup, _teardown) +def test_info_endpoint_command_parses(): + print_info_mock = MagicMock() + magic._print_endpoint_info = print_info_mock + command = "info conn_str" + spark_controller.get_all_sessions_endpoint_info = MagicMock(return_value=None) + + magic.spark(command) + + print_info_mock.assert_called_once_with(None) + + @with_setup(_setup, _teardown) def test_add_sessions_command_parses(): # Do not skip and python @@ -70,9 +82,8 @@ def test_add_sessions_command_parses(): @with_setup(_setup, _teardown) def test_add_sessions_command_extra_properties(): - conf.override_all({}) magic.spark("config {\"extra\": \"yes\"}") - assert conf.session_configs() == {"extra": "yes"} + assert magic.properties == {"extra": "yes"} add_sessions_mock = MagicMock() spark_controller.add_session = add_sessions_mock @@ -85,7 +96,6 @@ def test_add_sessions_command_extra_properties(): magic.spark(line) add_sessions_mock.assert_called_once_with(name, connection_string, False, {"kind": "spark", "extra": "yes"}) - conf.load() @with_setup(_setup, _teardown) @@ -101,6 +111,17 @@ def test_delete_sessions_command_parses(): mock_method.assert_called_once_with(name) +@with_setup(_setup, _teardown) +def test_delete_sessions_command_parses(): + mock_method = MagicMock() + spark_controller.delete_session_by_id = mock_method + line = "delete conn_str 7" + + magic.spark(line) + + mock_method.assert_called_once_with("conn_str", "7") + + @with_setup(_setup, _teardown) def test_cleanup_command_parses(): mock_method = MagicMock() @@ -112,6 +133,17 @@ def test_cleanup_command_parses(): mock_method.assert_called_once_with() +@with_setup(_setup, _teardown) +def test_cleanup_endpoint_command_parses(): + mock_method = MagicMock() + spark_controller.cleanup_endpoint = mock_method + line = "cleanup conn_str" + + magic.spark(line) + + mock_method.assert_called_once_with("conn_str") + + @raises(ValueError) @with_setup(_setup, _teardown) def test_bad_command_throws_exception(): diff --git a/tests/test_sparkcontroller.py b/tests/test_sparkcontroller.py index 4b825d0..e522404 100644 --- a/tests/test_sparkcontroller.py +++ b/tests/test_sparkcontroller.py @@ -1,5 +1,6 @@ from mock import MagicMock from nose.tools import with_setup +import json from remotespark.livyclientlib.sparkcontroller import SparkController @@ -8,6 +9,19 @@ client_factory = None controller = None +class DummyResponse: + def __init__(self, status_code, json_text): + self._status_code = status_code + self._json_text = json_text + + def json(self): + return json.loads(self._json_text) + + @property + def status_code(self): + return self._status_code + + def _setup(): global client_manager, client_factory, controller @@ -78,7 +92,7 @@ def test_cleanup(): def test_run_cell(): default_client = MagicMock() chosen_client = MagicMock() - default_client.execute = chosen_client.execute = MagicMock(return_value=(True,"")) + default_client.execute = chosen_client.execute = MagicMock(return_value=(True, "")) client_manager.get_any_client = MagicMock(return_value=default_client) client_manager.get_client = MagicMock(return_value=chosen_client) name = "session_name" @@ -102,7 +116,63 @@ def test_run_cell(): controller.run_cell_hive(cell, None) default_client.execute_hive.assert_called_with(cell) + @with_setup(_setup, _teardown) def test_get_client_keys(): controller.get_client_keys() client_manager.get_sessions_list.assert_called_once_with() + + +@with_setup(_setup, _teardown) +def test_get_all_sessions(): + http_client = MagicMock() + http_client.get.return_value = DummyResponse(200, '{"from":0,"total":2,"sessions":[{"id":0,"state":"idle","kind":' + '"spark","log":[""]}, {"id":1,"state":"busy","kind":"spark","log"' + ':[""]}]}') + client_factory.create_http_client.return_value = http_client + + sessions = controller.get_all_sessions_endpoint("conn_str") + + assert len(sessions) == 2 + + +@with_setup(_setup, _teardown) +def test_cleanup_endpoint(): + s0 = MagicMock() + s1 = MagicMock() + controller.get_all_sessions_endpoint = MagicMock(return_value=[s0, s1]) + + controller.cleanup_endpoint("conn_str") + + s0.delete.assert_called_once_with() + s1.delete.assert_called_once_with() + + +@with_setup(_setup, _teardown) +def test_delete_session_by_id_existent(): + http_client = MagicMock() + http_client.get.return_value = DummyResponse(200, '{"id":0,"state":"starting","kind":"spark","log":[]}') + client_factory.create_http_client.return_value = http_client + session = MagicMock() + create_session_method = MagicMock(return_value=session) + client_factory.create_session = create_session_method + + controller.delete_session_by_id("conn_str", "0") + + create_session_method.assert_called_once_with("conn_str", {"kind": "spark"}, "0", False) + session.delete.assert_called_once_with() + + +@with_setup(_setup, _teardown) +def test_delete_session_by_id_non_existent(): + http_client = MagicMock() + http_client.get.return_value = DummyResponse(404, '') + client_factory.create_http_client.return_value = http_client + session = MagicMock() + create_session_method = MagicMock(return_value=session) + client_factory.create_session = create_session_method + + controller.delete_session_by_id("conn_str", "0") + + assert len(create_session_method.mock_calls) == 0 + assert len(session.delete.mock_calls) == 0 diff --git a/tests/test_sparkkernelbase.py b/tests/test_sparkkernelbase.py index 01bf903..15a9355 100644 --- a/tests/test_sparkkernelbase.py +++ b/tests/test_sparkkernelbase.py @@ -54,7 +54,7 @@ def test_get_config(): url = "url" config = {"kernel_python_credentials": {user_ev: usr, pass_ev: pwd, url_ev: url}} - conf.override_all(config) + conf.override(config) u, p, r = kernel._get_configuration() @@ -67,7 +67,7 @@ def test_get_config(): @with_setup(_setup, _teardown) def test_get_config_not_set(): - conf.override_all({}) + conf.override({}) try: kernel._get_configuration()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_issue_reference", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 5 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
anyio==4.9.0 argon2-cffi==23.1.0 argon2-cffi-bindings==21.2.0 arrow==1.3.0 async-lru==2.0.5 attrs==25.3.0 babel==2.17.0 beautifulsoup4==4.13.3 bleach==6.2.0 certifi==2025.1.31 cffi==1.17.1 charset-normalizer==3.4.1 comm==0.2.2 decorator==5.2.1 defusedxml==0.7.1 exceptiongroup==1.2.2 fastjsonschema==2.21.1 fqdn==1.5.1 h11==0.14.0 httpcore==1.0.7 httpx==0.28.1 idna==3.10 importlib_metadata==8.6.1 iniconfig==2.1.0 ipykernel==4.1.1 ipython==4.0.0 ipython-genutils==0.2.0 ipywidgets==7.8.5 isoduration==20.11.0 Jinja2==3.1.6 json5==0.10.0 jsonpointer==3.0.0 jsonschema==4.23.0 jsonschema-specifications==2024.10.1 jupyter-events==0.12.0 jupyter-lsp==2.2.5 jupyter_client==8.6.3 jupyter_core==5.7.2 jupyter_server==2.15.0 jupyter_server_terminals==0.5.3 jupyterlab==4.1.5 jupyterlab_pygments==0.3.0 jupyterlab_server==2.27.3 jupyterlab_widgets==1.1.11 MarkupSafe==3.0.2 mistune==3.1.3 mock==5.2.0 narwhals==1.32.0 nbclient==0.10.2 nbconvert==7.16.6 nbformat==5.10.4 nose==1.3.7 notebook==7.1.3 notebook_shim==0.2.4 numpy==2.0.2 overrides==7.7.0 packaging==24.2 pandas==2.2.3 pandocfilters==1.5.1 pexpect==4.9.0 pickleshare==0.7.5 platformdirs==4.3.7 plotly==6.0.1 pluggy==1.5.0 prometheus_client==0.21.1 ptyprocess==0.7.0 pycparser==2.22 Pygments==2.19.1 pytest==8.3.5 python-dateutil==2.9.0.post0 python-json-logger==3.3.0 pytz==2025.2 PyYAML==6.0.2 pyzmq==26.3.0 referencing==0.36.2 -e git+https://github.com/jupyter-incubator/sparkmagic.git@7025cb5b607abde80d5dd8a701c40c3598801e9d#egg=remotespark requests==2.32.3 rfc3339-validator==0.1.4 rfc3986-validator==0.1.1 rpds-py==0.24.0 Send2Trash==1.8.3 simplegeneric==0.8.1 six==1.17.0 sniffio==1.3.1 soupsieve==2.6 terminado==0.18.1 tinycss2==1.4.0 tomli==2.2.1 tornado==6.4.2 traitlets==5.14.3 types-python-dateutil==2.9.0.20241206 typing_extensions==4.13.0 tzdata==2025.2 uri-template==1.3.0 urllib3==2.3.0 webcolors==24.11.1 webencodings==0.5.1 websocket-client==1.8.0 widgetsnbextension==3.6.10 zipp==3.21.0
name: sparkmagic channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - anyio==4.9.0 - argon2-cffi==23.1.0 - argon2-cffi-bindings==21.2.0 - arrow==1.3.0 - async-lru==2.0.5 - attrs==25.3.0 - babel==2.17.0 - beautifulsoup4==4.13.3 - bleach==6.2.0 - certifi==2025.1.31 - cffi==1.17.1 - charset-normalizer==3.4.1 - comm==0.2.2 - decorator==5.2.1 - defusedxml==0.7.1 - exceptiongroup==1.2.2 - fastjsonschema==2.21.1 - fqdn==1.5.1 - h11==0.14.0 - httpcore==1.0.7 - httpx==0.28.1 - idna==3.10 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - ipykernel==4.1.1 - ipython==4.0.0 - ipython-genutils==0.2.0 - ipywidgets==7.8.5 - isoduration==20.11.0 - jinja2==3.1.6 - json5==0.10.0 - jsonpointer==3.0.0 - jsonschema==4.23.0 - jsonschema-specifications==2024.10.1 - jupyter-client==8.6.3 - jupyter-core==5.7.2 - jupyter-events==0.12.0 - jupyter-lsp==2.2.5 - jupyter-server==2.15.0 - jupyter-server-terminals==0.5.3 - jupyterlab==4.1.5 - jupyterlab-pygments==0.3.0 - jupyterlab-server==2.27.3 - jupyterlab-widgets==1.1.11 - markupsafe==3.0.2 - mistune==3.1.3 - mock==5.2.0 - narwhals==1.32.0 - nbclient==0.10.2 - nbconvert==7.16.6 - nbformat==5.10.4 - nose==1.3.7 - notebook==7.1.3 - notebook-shim==0.2.4 - numpy==2.0.2 - overrides==7.7.0 - packaging==24.2 - pandas==2.2.3 - pandocfilters==1.5.1 - pexpect==4.9.0 - pickleshare==0.7.5 - platformdirs==4.3.7 - plotly==6.0.1 - pluggy==1.5.0 - prometheus-client==0.21.1 - ptyprocess==0.7.0 - pycparser==2.22 - pygments==2.19.1 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - python-json-logger==3.3.0 - pytz==2025.2 - pyyaml==6.0.2 - pyzmq==26.3.0 - referencing==0.36.2 - requests==2.32.3 - rfc3339-validator==0.1.4 - rfc3986-validator==0.1.1 - rpds-py==0.24.0 - send2trash==1.8.3 - simplegeneric==0.8.1 - six==1.17.0 - sniffio==1.3.1 - soupsieve==2.6 - terminado==0.18.1 - tinycss2==1.4.0 - tomli==2.2.1 - tornado==6.4.2 - traitlets==5.14.3 - types-python-dateutil==2.9.0.20241206 - typing-extensions==4.13.0 - tzdata==2025.2 - uri-template==1.3.0 - urllib3==2.3.0 - webcolors==24.11.1 - webencodings==0.5.1 - websocket-client==1.8.0 - widgetsnbextension==3.6.10 - zipp==3.21.0 prefix: /opt/conda/envs/sparkmagic
[ "tests/test_clientmanager.py::test_serialize_periodically", "tests/test_configuration.py::test_configuration_initialize_lazy", "tests/test_configuration.py::test_configuration_load_not_lazy", "tests/test_configuration.py::test_configuration_override", "tests/test_configuration.py::test_configuration_decorator", "tests/test_sparkkernelbase.py::test_get_config", "tests/test_sparkkernelbase.py::test_get_config_not_set" ]
[ "tests/test_configuration.py::test_configuration_initialize", "tests/test_remotesparkmagics.py::test_info_command_parses", "tests/test_remotesparkmagics.py::test_info_endpoint_command_parses", "tests/test_remotesparkmagics.py::test_add_sessions_command_parses", "tests/test_remotesparkmagics.py::test_add_sessions_command_extra_properties", "tests/test_remotesparkmagics.py::test_delete_sessions_command_parses", "tests/test_remotesparkmagics.py::test_cleanup_command_parses", "tests/test_remotesparkmagics.py::test_cleanup_endpoint_command_parses", "tests/test_remotesparkmagics.py::test_bad_command_throws_exception", "tests/test_remotesparkmagics.py::test_run_cell_command_parses", "tests/test_remotesparkmagics.py::test_run_cell_command_writes_to_err", "tests/test_remotesparkmagics.py::test_run_sql_command_parses", "tests/test_remotesparkmagics.py::test_run_hive_command_parses", "tests/test_remotesparkmagics.py::test_run_sql_command_returns_none_when_exception", "tests/test_remotesparkmagics.py::test_run_hive_command_returns_none_when_exception", "tests/test_remotesparkmagics.py::test_run_sql_command_stores_variable_in_user_ns", "tests/test_sparkcontroller.py::test_add_session", "tests/test_sparkcontroller.py::test_add_session_skip", "tests/test_sparkcontroller.py::test_delete_session", "tests/test_sparkcontroller.py::test_cleanup", "tests/test_sparkcontroller.py::test_run_cell", "tests/test_sparkcontroller.py::test_get_client_keys", "tests/test_sparkcontroller.py::test_get_all_sessions", "tests/test_sparkcontroller.py::test_cleanup_endpoint", "tests/test_sparkcontroller.py::test_delete_session_by_id_existent", "tests/test_sparkcontroller.py::test_delete_session_by_id_non_existent", "tests/test_sparkkernelbase.py::test_do_execute_initializes_magics_if_not_run", "tests/test_sparkkernelbase.py::test_call_spark", "tests/test_sparkkernelbase.py::test_execute_throws_if_fatal_error_happened", "tests/test_sparkkernelbase.py::test_execute_throws_if_fatal_error_happens_for_execution", "tests/test_sparkkernelbase.py::test_call_spark_sql_new_line", "tests/test_sparkkernelbase.py::test_call_spark_hive_new_line" ]
[ "tests/test_clientmanager.py::test_get_client_throws_when_client_not_exists", "tests/test_clientmanager.py::test_deserialize_on_creation", "tests/test_clientmanager.py::test_get_client", "tests/test_clientmanager.py::test_delete_client", "tests/test_clientmanager.py::test_delete_client_throws_when_client_not_exists", "tests/test_clientmanager.py::test_add_client_throws_when_client_exists", "tests/test_clientmanager.py::test_client_names_returned", "tests/test_clientmanager.py::test_get_any_client", "tests/test_clientmanager.py::test_get_any_client_raises_exception_with_no_client", "tests/test_clientmanager.py::test_get_any_client_raises_exception_with_two_clients", "tests/test_clientmanager.py::test_clean_up", "tests/test_clientmanager.py::test_clean_up_serializer", "tests/test_configuration.py::test_configuration_load", "tests/test_sparkkernelbase.py::test_initialize_magics", "tests/test_sparkkernelbase.py::test_start_session", "tests/test_sparkkernelbase.py::test_delete_session", "tests/test_sparkkernelbase.py::test_shutdown_cleans_up" ]
[ "tests/test_sparkkernelbase.py::test_set_config", "tests/test_sparkkernelbase.py::test_magic_not_supported" ]
Modified BSD License
349
jupyter-incubator__sparkmagic-94
a2ed74cab292c2a462d2080914c5b61ec064e448
2015-12-23 01:52:07
a2ed74cab292c2a462d2080914c5b61ec064e448
diff --git a/remotespark/remotesparkmagics.py b/remotespark/remotesparkmagics.py index dda3e53..705954f 100644 --- a/remotespark/remotesparkmagics.py +++ b/remotespark/remotesparkmagics.py @@ -43,6 +43,8 @@ class RemoteSparkMagics(Magics): except KeyError: self.logger.error("Could not read env vars for serialization.") + self.properties = conf.session_configs() + self.logger.debug("Initialized spark magics.") @magic_arguments() @@ -120,7 +122,7 @@ class RemoteSparkMagics(Magics): elif subcommand == "config": # Would normally do " ".join(args.command[1:]) but parse_argstring removes quotes... rest_of_line = user_input[7:] - conf.override(conf.session_configs.__name__, json.loads(rest_of_line)) + self.properties = json.loads(rest_of_line) # add elif subcommand == "add": if len(args.command) != 4 and len(args.command) != 5: @@ -135,7 +137,7 @@ class RemoteSparkMagics(Magics): else: skip = False - properties = copy.deepcopy(conf.session_configs()) + properties = copy.deepcopy(self.properties) properties["kind"] = self._get_livy_kind(language) self.spark_controller.add_session(name, connection_string, skip, properties) @@ -198,8 +200,7 @@ class RemoteSparkMagics(Magics): {} Session configs: {} -""".format("\n".join(sessions_info), conf.session_configs())) - +""".format("\n".join(sessions_info), self.properties)) def _print_endpoint_info(self, info_sessions): sessions_info = [" {}".format(i) for i in info_sessions] diff --git a/remotespark/sparkkernelbase.py b/remotespark/sparkkernelbase.py index 8adbb03..7b700fd 100644 --- a/remotespark/sparkkernelbase.py +++ b/remotespark/sparkkernelbase.py @@ -13,6 +13,11 @@ class SparkKernelBase(IPythonKernel): config_command = "config" sql_command = "sql" hive_command = "hive" + info_command = "info" + delete_command = "delete" + clean_up_command = "cleanup" + + force_flag = "f" def __init__(self, implementation, implementation_version, language, language_version, language_info, kernel_conf_name, session_language, client_name, **kwargs): @@ -61,7 +66,7 @@ class SparkKernelBase(IPythonKernel): restart_session = False if self.session_started: - if "f" not in flags: + if self.force_flag not in flags: self._show_user_error("A session has already been started. In order to modify the Spark configura" "tion, please provide the '-f' flag at the beginning of the config magic:\n" "\te.g. `%config -f {}`\n\nNote that this will kill the current session and" @@ -76,6 +81,33 @@ class SparkKernelBase(IPythonKernel): return self._run_restarting_session(code_to_run, silent, store_history, user_expressions, allow_stdin, restart_session) + elif subcommand == self.info_command: + code_to_run = "%spark info {}".format(self.connection_string) + return self._run_without_session(code_to_run, silent, store_history, user_expressions, allow_stdin) + elif subcommand == self.delete_command: + if self.force_flag not in flags: + self._show_user_error("The session you are trying to delete could be this kernel's session. In order " + "to delete this session, please provide the '-f' flag at the beginning of the " + "delete magic:\n\te.g. `%delete -f id`\n\nAll previously run commands in the " + "session will be lost.") + code_to_run = "" + else: + self.session_started = False + code_to_run = "%spark delete {} {}".format(self.connection_string, code_to_run) + + return self._run_without_session(code_to_run, silent, store_history, user_expressions, allow_stdin) + elif subcommand == self.clean_up_command: + if self.force_flag not in flags: + self._show_user_error("The sessions you are trying to delete could be this kernel's session or other " + "people's sessions. In order to delete them, please provide the '-f' flag at the " + "beginning of the cleanup magic:\n\te.g. `%cleanup -f`\n\nAll previously run " + "commands in the sessions will be lost.") + code_to_run = "" + else: + self.session_started = False + code_to_run = "%spark cleanup {}".format(self.connection_string) + + return self._run_without_session(code_to_run, silent, store_history, user_expressions, allow_stdin) else: self._show_user_error("Magic '{}' not supported.".format(subcommand)) return self._run_without_session("", silent, store_history, user_expressions, allow_stdin) @@ -153,14 +185,21 @@ class SparkKernelBase(IPythonKernel): split_code = code.split(None, 1) subcommand = split_code[0].lower() flags = [] - rest = split_code[1] + if len(split_code) > 1: + rest = split_code[1] + else: + rest = "" # Get all flags flag_split = rest.split(None, 1) - while len(flag_split) >= 2 and flag_split[0].startswith("-"): - flags.append(flag_split[0][1:].lower()) - rest = flag_split[1] - flag_split = rest.split(None, 1) + while len(flag_split) >= 1 and flag_split[0].startswith("-"): + if len(flag_split) >= 2: + flags.append(flag_split[0][1:].lower()) + rest = flag_split[1] + flag_split = rest.split(None, 1) + if len(flag_split) == 1: + flags.append(flag_split[0][1:].lower()) + flag_split = [""] # flags to lower flags = [i.lower() for i in flags] diff --git a/remotespark/utils/configuration.py b/remotespark/utils/configuration.py index 5668901..df09e52 100644 --- a/remotespark/utils/configuration.py +++ b/remotespark/utils/configuration.py @@ -36,23 +36,16 @@ def load(fsrw_class = None): overrides = {} else: overrides = json.loads(line) - override_all(overrides) + override(overrides) -def override_all(obj): +def override(obj): """Given a dictionary representing the overrided defaults for this configuration, initialize the global configuration.""" global _overrides _overrides = obj -def override(config, value): - """Given a string representing a configuration and a value for that configuration, - override the configuration. Initialize the overrided configuration beforehand.""" - initialize() - _overrides[config] = value - - def _override(f): """A decorator which first initializes the overrided configurations, then checks the global overrided defaults for the given configuration,
Expose livy endpoint management through wrapper kernel
jupyter-incubator/sparkmagic
diff --git a/tests/test_clientmanager.py b/tests/test_clientmanager.py index 400fadd..e088b35 100644 --- a/tests/test_clientmanager.py +++ b/tests/test_clientmanager.py @@ -30,8 +30,8 @@ def test_deserialize_on_creation(): def test_serialize_periodically(): - conf.override_all({conf.serialize_period_seconds.__name__: 0.1, - conf.serialize_periodically.__name__: True}) + conf.override({conf.serialize_period_seconds.__name__: 0.1, + conf.serialize_periodically.__name__: True}) serializer = MagicMock() ClientManager(serializer) diff --git a/tests/test_configuration.py b/tests/test_configuration.py index 8cdac08..92fd020 100644 --- a/tests/test_configuration.py +++ b/tests/test_configuration.py @@ -27,7 +27,7 @@ def test_configuration_initialize(): @with_setup(_setup) def test_configuration_initialize_lazy(): """Tests that the initialize function has no behavior if the override dict is already initialized""" - conf.override_all({}) + conf.override({}) fsrw_class = MagicMock(side_effect=ValueError) conf.initialize(fsrw_class) @@ -56,7 +56,7 @@ def test_configuration_load_not_lazy(): read_lines = MagicMock(return_value=[json.dumps(config)]) fsrw.read_lines = read_lines fsrw_class = MagicMock(return_value=fsrw) - conf.override_all({conf.default_chart_type.__name__: "bar"}) + conf.override({ conf.default_chart_type.__name__: "bar" }) conf.load(fsrw_class) assert conf._overrides is not None assert_equals(conf._overrides, config) @@ -65,21 +65,9 @@ def test_configuration_load_not_lazy(): @with_setup(_setup) def test_configuration_override(): - kpc = { 'username': 'U', 'password': 'P', 'url': 'L' } - overrides = { conf.kernel_python_credentials.__name__: kpc } - conf.override_all(overrides) - conf.override(conf.execute_timeout_seconds.__name__, 1) - assert_equals(conf._overrides, { conf.kernel_python_credentials.__name__: kpc, - conf.execute_timeout_seconds.__name__: 1 }) - assert_equals(conf.execute_timeout_seconds(), 1) - assert_equals(conf.kernel_python_credentials(), kpc) - - -@with_setup(_setup) -def test_configuration_override_all(): z = 1500 config = { conf.status_sleep_seconds.__name__: z } - conf.override_all(config) + conf.override(config) assert_equals(conf._overrides, config) assert_equals(conf.status_sleep_seconds(), z) @@ -88,7 +76,7 @@ def test_configuration_override_all(): def test_configuration_decorator(): def test_f(): return 0 - conf.override_all({test_f.__name__: -1}) + conf.override({test_f.__name__: -1}) test_f_decorated = conf._override(test_f) assert_not_equals(test_f_decorated(), test_f()) assert_equals(test_f_decorated(), -1) \ No newline at end of file diff --git a/tests/test_livysession.py b/tests/test_livysession.py index da9168f..d406532 100644 --- a/tests/test_livysession.py +++ b/tests/test_livysession.py @@ -60,7 +60,7 @@ class TestLivySession: @raises(AssertionError) def test_constructor_throws_status_sleep_seconds(self): - conf.override_all({ + conf.override({ "status_sleep_seconds": 0, "statement_sleep_seconds": 2, "create_sql_context_timeout_seconds": 60 @@ -70,7 +70,7 @@ class TestLivySession: @raises(AssertionError) def test_constructor_throws_statement_sleep_seconds(self): - conf.override_all({ + conf.override({ "status_sleep_seconds": 3, "statement_sleep_seconds": 0, "create_sql_context_timeout_seconds": 60 @@ -80,7 +80,7 @@ class TestLivySession: @raises(AssertionError) def test_constructor_throws_sql_create_timeout_seconds(self): - conf.override_all({ + conf.override({ "status_sleep_seconds": 4, "statement_sleep_seconds": 2, "create_sql_context_timeout_seconds": 0 @@ -90,7 +90,7 @@ class TestLivySession: @raises(ValueError) def test_constructor_throws_invalid_session_sql_combo(self): - conf.override_all({ + conf.override({ "status_sleep_seconds": 2, "statement_sleep_seconds": 2, "create_sql_context_timeout_seconds": 60 @@ -99,7 +99,7 @@ class TestLivySession: conf.load() def test_constructor_starts_with_existing_session(self): - conf.override_all({ + conf.override({ "status_sleep_seconds": 4, "statement_sleep_seconds": 2, "create_sql_context_timeout_seconds": 60 @@ -112,7 +112,7 @@ class TestLivySession: assert session.started_sql_context def test_constructor_starts_with_no_session(self): - conf.override_all({ + conf.override({ "status_sleep_seconds": 4, "statement_sleep_seconds": 2, "create_sql_context_timeout_seconds": 60 @@ -124,7 +124,7 @@ class TestLivySession: assert not session.started_sql_context def test_is_final_status(self): - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -142,7 +142,7 @@ class TestLivySession: http_client = MagicMock() http_client.post.return_value = DummyResponse(201, self.session_create_json) - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -161,7 +161,7 @@ class TestLivySession: http_client = MagicMock() http_client.post.return_value = DummyResponse(201, self.session_create_json) - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -180,7 +180,7 @@ class TestLivySession: http_client = MagicMock() http_client.post.return_value = DummyResponse(201, self.session_create_json) - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -197,7 +197,7 @@ class TestLivySession: http_client = MagicMock() http_client.post.return_value = DummyResponse(201, self.session_create_json) http_client.get.return_value = DummyResponse(200, self.ready_sessions_json) - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -218,12 +218,12 @@ class TestLivySession: DummyResponse(200, self.ready_sessions_json)] http_client.get.side_effect = self._next_response_get - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) session = self._create_session(http_client=http_client) - conf.override_all({}) + conf.override({}) session.start() @@ -241,7 +241,7 @@ class TestLivySession: DummyResponse(200, self.error_sessions_json)] http_client.get.side_effect = self._next_response_get - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.011, "statement_sleep_seconds": 6000 }) @@ -261,7 +261,7 @@ class TestLivySession: DummyResponse(200, self.ready_sessions_json)] http_client.get.side_effect = self._next_response_get - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.011, "statement_sleep_seconds": 6000 }) @@ -275,7 +275,7 @@ class TestLivySession: def test_delete_session_when_active(self): http_client = MagicMock() http_client.post.return_value = DummyResponse(201, self.session_create_json) - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -291,7 +291,7 @@ class TestLivySession: def test_delete_session_when_not_started(self): http_client = MagicMock() http_client.post.return_value = DummyResponse(201, self.session_create_json) - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -307,7 +307,7 @@ class TestLivySession: def test_delete_session_when_dead_throws(self): http_client = MagicMock() http_client.post.return_value = DummyResponse(201, self.session_create_json) - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -326,7 +326,7 @@ class TestLivySession: self.get_responses = [DummyResponse(200, self.running_statement_json), DummyResponse(200, self.ready_statement_json)] http_client.get.side_effect = self._next_response_get - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -356,7 +356,7 @@ class TestLivySession: DummyResponse(200, self.ready_sessions_json), DummyResponse(200, self.ready_statement_json)] http_client.get.side_effect = self._next_response_get - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -392,7 +392,7 @@ class TestLivySession: DummyResponse(200, self.ready_sessions_json), DummyResponse(200, self.ready_statement_json)] http_client.get.side_effect = self._next_response_get - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -421,7 +421,7 @@ class TestLivySession: DummyResponse(200, self.ready_sessions_json), DummyResponse(200, self.ready_statement_json)] http_client.get.side_effect = self._next_response_get - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -449,7 +449,7 @@ class TestLivySession: DummyResponse(200, self.running_statement_json), DummyResponse(200, self.ready_statement_json)] http_client.get.side_effect = self._next_response_get - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) @@ -467,7 +467,7 @@ class TestLivySession: http_client = MagicMock() http_client.connection_string = connection_string kind = Constants.session_kind_spark - conf.override_all({ + conf.override({ "status_sleep_seconds": 0.01, "statement_sleep_seconds": 0.01 }) diff --git a/tests/test_remotesparkmagics.py b/tests/test_remotesparkmagics.py index b8f966f..e8f2d8f 100644 --- a/tests/test_remotesparkmagics.py +++ b/tests/test_remotesparkmagics.py @@ -14,7 +14,7 @@ shell = None def _setup(): global magic, spark_controller, shell - conf.override_all({}) + conf.override({}) shell = MagicMock() magic = RemoteSparkMagics(shell=None) @@ -82,9 +82,8 @@ def test_add_sessions_command_parses(): @with_setup(_setup, _teardown) def test_add_sessions_command_extra_properties(): - conf.override_all({}) magic.spark("config {\"extra\": \"yes\"}") - assert conf.session_configs() == {"extra": "yes"} + assert magic.properties == {"extra": "yes"} add_sessions_mock = MagicMock() spark_controller.add_session = add_sessions_mock @@ -97,7 +96,6 @@ def test_add_sessions_command_extra_properties(): magic.spark(line) add_sessions_mock.assert_called_once_with(name, connection_string, False, {"kind": "spark", "extra": "yes"}) - conf.load() @with_setup(_setup, _teardown) diff --git a/tests/test_sparkkernelbase.py b/tests/test_sparkkernelbase.py index 01bf903..78d43d4 100644 --- a/tests/test_sparkkernelbase.py +++ b/tests/test_sparkkernelbase.py @@ -54,7 +54,7 @@ def test_get_config(): url = "url" config = {"kernel_python_credentials": {user_ev: usr, pass_ev: pwd, url_ev: url}} - conf.override_all(config) + conf.override(config) u, p, r = kernel._get_configuration() @@ -67,7 +67,7 @@ def test_get_config(): @with_setup(_setup, _teardown) def test_get_config_not_set(): - conf.override_all({}) + conf.override({}) try: kernel._get_configuration() @@ -172,6 +172,82 @@ def test_magic_not_supported(): kernel.do_execute(code, False) +@with_setup(_setup, _teardown) +def test_info(): + code = "%info" + + # Call method + kernel.do_execute(code, False) + + # Assertions + assert not kernel.session_started + assert call("%spark info {}".format(conn_str), False, True, None, False) in execute_cell_mock.mock_calls + + +@with_setup(_setup, _teardown) +def test_delete_force(): + code = "%delete -f 9" + kernel.session_started = True + user_error = MagicMock() + kernel._show_user_error = user_error + + # Call method + kernel.do_execute(code, False) + + # Assertions + assert not kernel.session_started + assert call("%spark delete {} 9".format(conn_str), False, True, None, False) in execute_cell_mock.mock_calls + assert len(user_error.mock_calls) == 0 + + +@with_setup(_setup, _teardown) +def test_delete_not_force(): + code = "%delete 9" + kernel.session_started = True + user_error = MagicMock() + kernel._show_user_error = user_error + + # Call method + kernel.do_execute(code, False) + + # Assertions + assert kernel.session_started + assert not call("%spark delete {} 9".format(conn_str), False, True, None, False) in execute_cell_mock.mock_calls + assert len(user_error.mock_calls) == 1 + + +@with_setup(_setup, _teardown) +def test_cleanup_force(): + code = "%cleanup -f" + kernel.session_started = True + user_error = MagicMock() + kernel._show_user_error = user_error + + # Call method + kernel.do_execute(code, False) + + # Assertions + assert not kernel.session_started + assert call("%spark cleanup {}".format(conn_str), False, True, None, False) in execute_cell_mock.mock_calls + assert len(user_error.mock_calls) == 0 + + +@with_setup(_setup, _teardown) +def test_cleanup_not_force(): + code = "%cleanup" + kernel.session_started = True + user_error = MagicMock() + kernel._show_user_error = user_error + + # Call method + kernel.do_execute(code, False) + + # Assertions + assert kernel.session_started + assert not call("%spark cleanup {}".format(conn_str), False, True, None, False) in execute_cell_mock.mock_calls + assert len(user_error.mock_calls) == 1 + + @with_setup(_setup, _teardown) def test_call_spark(): # Set up
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 3, "test_score": 2 }, "num_modified_files": 3 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "mkdir ~/.sparkmagic", "cp remotespark/default_config.json ~/.sparkmagic/config.json" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
anyio==4.9.0 argon2-cffi==23.1.0 argon2-cffi-bindings==21.2.0 arrow==1.3.0 async-lru==2.0.5 attrs==25.3.0 babel==2.17.0 beautifulsoup4==4.13.3 bleach==6.2.0 certifi==2025.1.31 cffi==1.17.1 charset-normalizer==3.4.1 comm==0.2.2 decorator==5.2.1 defusedxml==0.7.1 exceptiongroup==1.2.2 fastjsonschema==2.21.1 fqdn==1.5.1 h11==0.14.0 httpcore==1.0.7 httpx==0.28.1 idna==3.10 importlib_metadata==8.6.1 iniconfig==2.1.0 ipykernel==4.1.1 ipython==4.0.0 ipython-genutils==0.2.0 ipywidgets==7.8.5 isoduration==20.11.0 Jinja2==3.1.6 json5==0.10.0 jsonpointer==3.0.0 jsonschema==4.23.0 jsonschema-specifications==2024.10.1 jupyter-events==0.12.0 jupyter-lsp==2.2.5 jupyter_client==8.6.3 jupyter_core==5.7.2 jupyter_server==2.15.0 jupyter_server_terminals==0.5.3 jupyterlab==4.1.5 jupyterlab_pygments==0.3.0 jupyterlab_server==2.27.3 jupyterlab_widgets==1.1.11 MarkupSafe==3.0.2 mistune==3.1.3 mock==5.2.0 narwhals==1.32.0 nbclient==0.10.2 nbconvert==7.16.6 nbformat==5.10.4 nose==1.3.7 notebook==7.1.3 notebook_shim==0.2.4 numpy==2.0.2 overrides==7.7.0 packaging==24.2 pandas==2.2.3 pandocfilters==1.5.1 pexpect==4.9.0 pickleshare==0.7.5 platformdirs==4.3.7 plotly==6.0.1 pluggy==1.5.0 prometheus_client==0.21.1 ptyprocess==0.7.0 pycparser==2.22 Pygments==2.19.1 pytest==8.3.5 python-dateutil==2.9.0.post0 python-json-logger==3.3.0 pytz==2025.2 PyYAML==6.0.2 pyzmq==26.3.0 referencing==0.36.2 -e git+https://github.com/jupyter-incubator/sparkmagic.git@a2ed74cab292c2a462d2080914c5b61ec064e448#egg=remotespark requests==2.32.3 rfc3339-validator==0.1.4 rfc3986-validator==0.1.1 rpds-py==0.24.0 Send2Trash==1.8.3 simplegeneric==0.8.1 six==1.17.0 sniffio==1.3.1 soupsieve==2.6 terminado==0.18.1 tinycss2==1.4.0 tomli==2.2.1 tornado==6.4.2 traitlets==5.14.3 types-python-dateutil==2.9.0.20241206 typing_extensions==4.13.0 tzdata==2025.2 uri-template==1.3.0 urllib3==2.3.0 webcolors==24.11.1 webencodings==0.5.1 websocket-client==1.8.0 widgetsnbextension==3.6.10 zipp==3.21.0
name: sparkmagic channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - anyio==4.9.0 - argon2-cffi==23.1.0 - argon2-cffi-bindings==21.2.0 - arrow==1.3.0 - async-lru==2.0.5 - attrs==25.3.0 - babel==2.17.0 - beautifulsoup4==4.13.3 - bleach==6.2.0 - certifi==2025.1.31 - cffi==1.17.1 - charset-normalizer==3.4.1 - comm==0.2.2 - decorator==5.2.1 - defusedxml==0.7.1 - exceptiongroup==1.2.2 - fastjsonschema==2.21.1 - fqdn==1.5.1 - h11==0.14.0 - httpcore==1.0.7 - httpx==0.28.1 - idna==3.10 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - ipykernel==4.1.1 - ipython==4.0.0 - ipython-genutils==0.2.0 - ipywidgets==7.8.5 - isoduration==20.11.0 - jinja2==3.1.6 - json5==0.10.0 - jsonpointer==3.0.0 - jsonschema==4.23.0 - jsonschema-specifications==2024.10.1 - jupyter-client==8.6.3 - jupyter-core==5.7.2 - jupyter-events==0.12.0 - jupyter-lsp==2.2.5 - jupyter-server==2.15.0 - jupyter-server-terminals==0.5.3 - jupyterlab==4.1.5 - jupyterlab-pygments==0.3.0 - jupyterlab-server==2.27.3 - jupyterlab-widgets==1.1.11 - markupsafe==3.0.2 - mistune==3.1.3 - mock==5.2.0 - narwhals==1.32.0 - nbclient==0.10.2 - nbconvert==7.16.6 - nbformat==5.10.4 - nose==1.3.7 - notebook==7.1.3 - notebook-shim==0.2.4 - numpy==2.0.2 - overrides==7.7.0 - packaging==24.2 - pandas==2.2.3 - pandocfilters==1.5.1 - pexpect==4.9.0 - pickleshare==0.7.5 - platformdirs==4.3.7 - plotly==6.0.1 - pluggy==1.5.0 - prometheus-client==0.21.1 - ptyprocess==0.7.0 - pycparser==2.22 - pygments==2.19.1 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - python-json-logger==3.3.0 - pytz==2025.2 - pyyaml==6.0.2 - pyzmq==26.3.0 - referencing==0.36.2 - requests==2.32.3 - rfc3339-validator==0.1.4 - rfc3986-validator==0.1.1 - rpds-py==0.24.0 - send2trash==1.8.3 - simplegeneric==0.8.1 - six==1.17.0 - sniffio==1.3.1 - soupsieve==2.6 - terminado==0.18.1 - tinycss2==1.4.0 - tomli==2.2.1 - tornado==6.4.2 - traitlets==5.14.3 - types-python-dateutil==2.9.0.20241206 - typing-extensions==4.13.0 - tzdata==2025.2 - uri-template==1.3.0 - urllib3==2.3.0 - webcolors==24.11.1 - webencodings==0.5.1 - websocket-client==1.8.0 - widgetsnbextension==3.6.10 - zipp==3.21.0 prefix: /opt/conda/envs/sparkmagic
[ "tests/test_clientmanager.py::test_serialize_periodically", "tests/test_configuration.py::test_configuration_initialize_lazy", "tests/test_configuration.py::test_configuration_load_not_lazy", "tests/test_configuration.py::test_configuration_override", "tests/test_configuration.py::test_configuration_decorator", "tests/test_sparkkernelbase.py::test_get_config", "tests/test_sparkkernelbase.py::test_get_config_not_set" ]
[ "tests/test_configuration.py::test_configuration_initialize", "tests/test_remotesparkmagics.py::test_info_command_parses", "tests/test_remotesparkmagics.py::test_info_endpoint_command_parses", "tests/test_remotesparkmagics.py::test_add_sessions_command_parses", "tests/test_remotesparkmagics.py::test_add_sessions_command_extra_properties", "tests/test_remotesparkmagics.py::test_delete_sessions_command_parses", "tests/test_remotesparkmagics.py::test_cleanup_command_parses", "tests/test_remotesparkmagics.py::test_cleanup_endpoint_command_parses", "tests/test_remotesparkmagics.py::test_bad_command_throws_exception", "tests/test_remotesparkmagics.py::test_run_cell_command_parses", "tests/test_remotesparkmagics.py::test_run_cell_command_writes_to_err", "tests/test_remotesparkmagics.py::test_run_sql_command_parses", "tests/test_remotesparkmagics.py::test_run_hive_command_parses", "tests/test_remotesparkmagics.py::test_run_sql_command_returns_none_when_exception", "tests/test_remotesparkmagics.py::test_run_hive_command_returns_none_when_exception", "tests/test_remotesparkmagics.py::test_run_sql_command_stores_variable_in_user_ns", "tests/test_sparkkernelbase.py::test_do_execute_initializes_magics_if_not_run", "tests/test_sparkkernelbase.py::test_info", "tests/test_sparkkernelbase.py::test_delete_force", "tests/test_sparkkernelbase.py::test_cleanup_force", "tests/test_sparkkernelbase.py::test_cleanup_not_force", "tests/test_sparkkernelbase.py::test_call_spark", "tests/test_sparkkernelbase.py::test_execute_throws_if_fatal_error_happened", "tests/test_sparkkernelbase.py::test_execute_throws_if_fatal_error_happens_for_execution", "tests/test_sparkkernelbase.py::test_call_spark_sql_new_line", "tests/test_sparkkernelbase.py::test_call_spark_hive_new_line" ]
[ "tests/test_clientmanager.py::test_get_client_throws_when_client_not_exists", "tests/test_clientmanager.py::test_deserialize_on_creation", "tests/test_clientmanager.py::test_get_client", "tests/test_clientmanager.py::test_delete_client", "tests/test_clientmanager.py::test_delete_client_throws_when_client_not_exists", "tests/test_clientmanager.py::test_add_client_throws_when_client_exists", "tests/test_clientmanager.py::test_client_names_returned", "tests/test_clientmanager.py::test_get_any_client", "tests/test_clientmanager.py::test_get_any_client_raises_exception_with_no_client", "tests/test_clientmanager.py::test_get_any_client_raises_exception_with_two_clients", "tests/test_clientmanager.py::test_clean_up", "tests/test_clientmanager.py::test_clean_up_serializer", "tests/test_configuration.py::test_configuration_load", "tests/test_sparkkernelbase.py::test_initialize_magics", "tests/test_sparkkernelbase.py::test_start_session", "tests/test_sparkkernelbase.py::test_delete_session", "tests/test_sparkkernelbase.py::test_shutdown_cleans_up" ]
[ "tests/test_sparkkernelbase.py::test_set_config", "tests/test_sparkkernelbase.py::test_magic_not_supported", "tests/test_sparkkernelbase.py::test_delete_not_force" ]
Modified BSD License
350
mogproject__color-ssh-11
9adb19916b0205fd6a88beddcd8669114edc449c
2015-12-23 11:42:42
8ef23299ceb4e19e5d33562edb0066686eead51d
diff --git a/src/color_ssh/__init__.py b/src/color_ssh/__init__.py index b794fd4..df9144c 100644 --- a/src/color_ssh/__init__.py +++ b/src/color_ssh/__init__.py @@ -1,1 +1,1 @@ -__version__ = '0.1.0' +__version__ = '0.1.1' diff --git a/src/color_ssh/color_cat.py b/src/color_ssh/color_cat.py index a9d1412..036ced8 100644 --- a/src/color_ssh/color_cat.py +++ b/src/color_ssh/color_cat.py @@ -68,8 +68,9 @@ def main(argv=sys.argv, stdin=io2bytes(sys.stdin), stdout=io2bytes(sys.stdout), """ setting = Setting().parse_args(argv, stdout) - # Note: Do not use 'fileinput' module because it causes a buffering problem. - try: + @exception_handler(lambda e: stderr.write(('%s: %s\n' % (e.__class__.__name__, e)).encode('utf-8', 'ignore'))) + def f(): + # Note: Do not use 'fileinput' module because it causes a buffering problem. for path in setting.paths: fh = stdin if path is None else io.open(path, 'rb', 0) try: @@ -79,9 +80,6 @@ def main(argv=sys.argv, stdin=io2bytes(sys.stdin), stdout=io2bytes(sys.stdout), finally: if fh is not stdin: fh.close() + return 0 - except Exception as e: - stderr.write(('%s: %s\n' % (e.__class__.__name__, e)).encode('utf-8', 'ignore')) - return 1 - - return 0 + return f() diff --git a/src/color_ssh/color_ssh.py b/src/color_ssh/color_ssh.py index f8d82d8..1d2e798 100644 --- a/src/color_ssh/color_ssh.py +++ b/src/color_ssh/color_ssh.py @@ -106,7 +106,12 @@ def run_task(args): prefix = ['color-cat', '-l', label] - try: + def exc_func(e): + msg = '%s: %s\nlabel=%s, command=%s\n' % (e.__class__.__name__, e, label, command) + stderr.write(msg.encode('utf-8', 'ignore')) + + @exception_handler(exc_func) + def f(): proc_stdout = subprocess.Popen(prefix, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr) proc_stderr = subprocess.Popen(prefix + ['-s', '+'], stdin=subprocess.PIPE, stdout=stderr, stderr=stderr) ret = subprocess.call(command, stdin=None, stdout=proc_stdout.stdin, stderr=proc_stderr.stdin) @@ -116,11 +121,9 @@ def run_task(args): proc_stdout.wait() proc_stderr.wait() - except Exception as e: - msg = '%s: %s\nlabel=%s, command=%s\n' % (e.__class__.__name__, e, label, command) - stderr.write(msg.encode('utf-8', 'ignore')) - return 1 - return ret + return ret + + return f() def main(argv=sys.argv, stdout=io2bytes(sys.stdout), stderr=io2bytes(sys.stderr)): @@ -128,7 +131,8 @@ def main(argv=sys.argv, stdout=io2bytes(sys.stdout), stderr=io2bytes(sys.stderr) Main function """ - try: + @exception_handler(lambda e: stderr.write(('%s: %s\n' % (e.__class__.__name__, e)).encode('utf-8', 'ignore'))) + def f(): setting = Setting().parse_args(argv, stdout) n = min(len(setting.tasks), setting.parallelism) if n <= 1: @@ -136,9 +140,6 @@ def main(argv=sys.argv, stdout=io2bytes(sys.stdout), stderr=io2bytes(sys.stderr) else: pool = Pool(n) ret = pool.map(run_task, setting.tasks) - except Exception as e: - msg = '%s: %s\n' % (e.__class__.__name__, e) - stderr.write(msg.encode('utf-8', 'ignore')) - return 1 + return max(ret) - return max(ret) + return f() diff --git a/src/color_ssh/util/util.py b/src/color_ssh/util/util.py index e1cdb8a..8bdea63 100644 --- a/src/color_ssh/util/util.py +++ b/src/color_ssh/util/util.py @@ -3,7 +3,7 @@ from __future__ import division, print_function, absolute_import, unicode_litera import sys import os -__all__ = ['PY3', 'arg2bytes', 'io2bytes', 'distribute'] +__all__ = ['PY3', 'arg2bytes', 'io2bytes', 'distribute', 'exception_handler'] PY3 = sys.version_info >= (3,) @@ -37,3 +37,24 @@ def distribute(num_workers, tasks): ret.append(tasks[j:j + k]) j += k return ret + + +# +# Decorators +# +def exception_handler(exception_func): + def f(func): + import functools + + @functools.wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except KeyboardInterrupt: + return 130 + except Exception as e: + exception_func(e) + return 1 + + return wrapper + return f
Kill all processes when interrupted by keyboard
mogproject/color-ssh
diff --git a/tests/color_ssh/util/__init__.py b/tests/color_ssh/util/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/color_ssh/util/test_util.py b/tests/color_ssh/util/test_util.py new file mode 100644 index 0000000..50082dc --- /dev/null +++ b/tests/color_ssh/util/test_util.py @@ -0,0 +1,13 @@ +from __future__ import division, print_function, absolute_import, unicode_literals + +from mog_commons.unittest import TestCase +from color_ssh.util.util import exception_handler + + +class TestUtil(TestCase): + def test_exception_handler(self): + @exception_handler(lambda e: e) + def f(): + raise KeyboardInterrupt + + self.assertEqual(f(), 130)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 3, "test_score": 3 }, "num_modified_files": 4 }
0.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pep8", "coverage", "six", "mog-commons" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.5", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 -e git+https://github.com/mogproject/color-ssh.git@9adb19916b0205fd6a88beddcd8669114edc449c#egg=color_ssh coverage==6.2 importlib-metadata==4.8.3 iniconfig==1.1.1 Jinja2==3.0.3 MarkupSafe==2.0.1 mog-commons==0.2.3 packaging==21.3 pep8==1.7.1 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 six==1.17.0 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: color-ssh channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - coverage==6.2 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - jinja2==3.0.3 - markupsafe==2.0.1 - mog-commons==0.2.3 - packaging==21.3 - pep8==1.7.1 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - six==1.17.0 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/color-ssh
[ "tests/color_ssh/util/test_util.py::TestUtil::test_exception_handler" ]
[]
[]
[]
null
351
mogproject__color-ssh-12
b39783565319ee50b34988c29193f02a90122e2c
2015-12-26 06:06:56
8ef23299ceb4e19e5d33562edb0066686eead51d
diff --git a/src/color_ssh/__init__.py b/src/color_ssh/__init__.py index df9144c..10939f0 100644 --- a/src/color_ssh/__init__.py +++ b/src/color_ssh/__init__.py @@ -1,1 +1,1 @@ -__version__ = '0.1.1' +__version__ = '0.1.2' diff --git a/src/color_ssh/color_ssh.py b/src/color_ssh/color_ssh.py index 1d2e798..e575962 100644 --- a/src/color_ssh/color_ssh.py +++ b/src/color_ssh/color_ssh.py @@ -4,6 +4,7 @@ import sys import io import shlex import subprocess +import re from optparse import OptionParser from multiprocessing.pool import Pool from color_ssh.util.util import * @@ -64,20 +65,25 @@ class Setting(object): stdout.write(arg2bytes(parser.format_help().encode('utf-8'))) parser.exit(2) - prefix = shlex.split(option.ssh) - if not hosts: hosts = args[:1] del args[0] - # distribute args + # parse hosts + parsed_hosts = [self._parse_host(h) for h in hosts] + + tasks = [] if option.distribute: + # distribute args dist_prefix = shlex.split(option.distribute) d = distribute(len(hosts), args) - tasks = [(option.label or self._extract_label(host), - prefix + [host] + dist_prefix + d[i]) for i, host in enumerate(hosts) if d[i]] + for i, (user, host, port) in enumerate(parsed_hosts): + if d[i]: + label = option.label or host + tasks.append((label, self._ssh_args(option.ssh, user, host, port) + dist_prefix + d[i])) else: - tasks = [(option.label or self._extract_label(host), prefix + [host] + args) for host in hosts] + for user, host, port in parsed_hosts: + tasks.append((option.label or host, self._ssh_args(option.ssh, user, host, port) + args)) self.parallelism = option.parallelism self.tasks = tasks @@ -93,8 +99,20 @@ class Setting(object): return list(filter(lambda x: x, (line.strip() for line in lines))) @staticmethod - def _extract_label(host): - return host.rsplit('@', 1)[-1] + def _parse_host(s): + """ + :param s: string : [user@]host[:port] + :return: tuple of (user, host, port) + """ + ret = re.match('^(?:([^:@]+)@)?([^:@]+)(?::(\d+))?$', s) + if not ret: + raise ValueError('Illegal format: %s' % s) + return ret.groups() + + @staticmethod + def _ssh_args(ssh_cmd, user, host, port): + user_host = [('' if user is None else '%s@' % user) + host] + return shlex.split(ssh_cmd) + ([] if port is None else ['-p', port]) + user_host def run_task(args):
Support port option in host list and host string
mogproject/color-ssh
diff --git a/tests/color_ssh/test_color_ssh.py b/tests/color_ssh/test_color_ssh.py index 67684f8..e2fc45a 100644 --- a/tests/color_ssh/test_color_ssh.py +++ b/tests/color_ssh/test_color_ssh.py @@ -62,14 +62,15 @@ class TestSetting(TestCase): ('server-4', ['ssh', 'server-4', 'pwd']), ('server-5', ['ssh', 'server-5', 'pwd']), ('server-6', ['ssh', 'server-6', 'pwd']), - ('server-7', ['ssh', 'server-7', 'pwd']), - ('server-8', ['ssh', 'server-8', 'pwd']), + ('server-7', ['ssh', '-p', '22', 'server-7', 'pwd']), + ('server-8', ['ssh', '-p', '1022', 'server-8', 'pwd']), ('server-9', ['ssh', 'root@server-9', 'pwd']), - ('server-10', ['ssh', 'root@server-10', 'pwd']), + ('server-10', ['ssh', '-p', '1022', 'root@server-10', 'pwd']), ]) - self._check(self._parse(['-H', 'server-11 root@server-12', 'pwd']), [ + self._check(self._parse(['-H', 'server-11 root@server-12 root@server-13:1022', 'pwd']), [ ('server-11', ['ssh', 'server-11', 'pwd']), ('server-12', ['ssh', 'root@server-12', 'pwd']), + ('server-13', ['ssh', '-p', '1022', 'root@server-13', 'pwd']), ]) self._check(self._parse(['--hosts', hosts_path, '--host', 'server-11 root@server-12', 'pwd']), [ ('server-1', ['ssh', 'server-1', 'pwd']), @@ -78,10 +79,10 @@ class TestSetting(TestCase): ('server-4', ['ssh', 'server-4', 'pwd']), ('server-5', ['ssh', 'server-5', 'pwd']), ('server-6', ['ssh', 'server-6', 'pwd']), - ('server-7', ['ssh', 'server-7', 'pwd']), - ('server-8', ['ssh', 'server-8', 'pwd']), + ('server-7', ['ssh', '-p', '22', 'server-7', 'pwd']), + ('server-8', ['ssh', '-p', '1022', 'server-8', 'pwd']), ('server-9', ['ssh', 'root@server-9', 'pwd']), - ('server-10', ['ssh', 'root@server-10', 'pwd']), + ('server-10', ['ssh', '-p', '1022', 'root@server-10', 'pwd']), ('server-11', ['ssh', 'server-11', 'pwd']), ('server-12', ['ssh', 'root@server-12', 'pwd']), ]) @@ -103,6 +104,16 @@ class TestSetting(TestCase): self.assertSystemExit(2, Setting().parse_args, ['color-ssh', '--label', 'x'], out) self.assertSystemExit(2, Setting().parse_args, ['color-ssh', '--host', ' ', 'pwd'], out) + def test_parse_host_error(self): + self.assertRaises(ValueError, Setting._parse_host, '') + self.assertRaises(ValueError, Setting._parse_host, '@') + self.assertRaises(ValueError, Setting._parse_host, ':') + self.assertRaises(ValueError, Setting._parse_host, 'a:') + self.assertRaises(ValueError, Setting._parse_host, 'a:b') + self.assertRaises(ValueError, Setting._parse_host, '@a:0') + self.assertRaises(ValueError, Setting._parse_host, 'a:b@c:0') + self.assertRaises(ValueError, Setting._parse_host, 'a@@c:0') + class TestMain(TestCase): def test_main_single_proc(self): diff --git a/tests/resources/test_color_ssh_hosts.txt b/tests/resources/test_color_ssh_hosts.txt index 3b9d2dd..f91ff9f 100644 --- a/tests/resources/test_color_ssh_hosts.txt +++ b/tests/resources/test_color_ssh_hosts.txt @@ -4,7 +4,7 @@ server-3 server-4 server-5 server-6 -server-7 -server-8 +server-7:22 +server-8:1022 root@server-9 -root@server-10 \ No newline at end of file +root@server-10:1022 \ No newline at end of file
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 3, "test_score": 3 }, "num_modified_files": 2 }
0.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "six", "mog-commons>=0.2.2" ], "pre_install": null, "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/mogproject/color-ssh.git@b39783565319ee50b34988c29193f02a90122e2c#egg=color_ssh coverage==7.8.0 exceptiongroup==1.2.2 iniconfig==2.1.0 Jinja2==3.1.6 MarkupSafe==3.0.2 mog-commons==0.2.3 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 pytest-cov==6.0.0 six==1.17.0 tomli==2.2.1
name: color-ssh channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.8.0 - exceptiongroup==1.2.2 - iniconfig==2.1.0 - jinja2==3.1.6 - markupsafe==3.0.2 - mog-commons==0.2.3 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - pytest-cov==6.0.0 - six==1.17.0 - tomli==2.2.1 prefix: /opt/conda/envs/color-ssh
[ "tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_args", "tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_host_error" ]
[]
[ "tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_args_error", "tests/color_ssh/test_color_ssh.py::TestMain::test_main_load_error", "tests/color_ssh/test_color_ssh.py::TestMain::test_main_multi_proc", "tests/color_ssh/test_color_ssh.py::TestMain::test_main_single_proc", "tests/color_ssh/test_color_ssh.py::TestMain::test_main_task_error" ]
[]
null
352
mogproject__color-ssh-18
4c8d1949137759046ddff3b922eb84148c6ffa19
2015-12-27 13:52:04
8ef23299ceb4e19e5d33562edb0066686eead51d
diff --git a/src/color_ssh/__init__.py b/src/color_ssh/__init__.py index 8ce9b36..7525d19 100644 --- a/src/color_ssh/__init__.py +++ b/src/color_ssh/__init__.py @@ -1,1 +1,1 @@ -__version__ = '0.1.3' +__version__ = '0.1.4' diff --git a/src/color_ssh/color_cat.py b/src/color_ssh/color_cat.py index 036ced8..7509d49 100644 --- a/src/color_ssh/color_cat.py +++ b/src/color_ssh/color_cat.py @@ -75,7 +75,7 @@ def main(argv=sys.argv, stdin=io2bytes(sys.stdin), stdout=io2bytes(sys.stdout), fh = stdin if path is None else io.open(path, 'rb', 0) try: for line in iter(fh.readline, b''): - stdout.write(setting.prefix + line + RESET) + stdout.write(setting.prefix + line.rstrip(b'\n') + RESET + b'\n') stdout.flush() finally: if fh is not stdin: diff --git a/src/color_ssh/color_ssh.py b/src/color_ssh/color_ssh.py index 6cfbb2d..5aaac08 100644 --- a/src/color_ssh/color_ssh.py +++ b/src/color_ssh/color_ssh.py @@ -131,7 +131,7 @@ class Setting(object): def _build_host_string(user, host): ret = host if user: - ret = '%s@' % user + ret + ret = str('%s@') % user + ret return ret @staticmethod diff --git a/src/color_ssh/util/util.py b/src/color_ssh/util/util.py index 8bdea63..ed7e846 100644 --- a/src/color_ssh/util/util.py +++ b/src/color_ssh/util/util.py @@ -2,6 +2,7 @@ from __future__ import division, print_function, absolute_import, unicode_litera import sys import os +import errno __all__ = ['PY3', 'arg2bytes', 'io2bytes', 'distribute', 'exception_handler'] @@ -52,6 +53,12 @@ def exception_handler(exception_func): return func(*args, **kwargs) except KeyboardInterrupt: return 130 + except IOError as e: + if e.errno == errno.EPIPE: + return 0 + else: + exception_func(e) + return 1 except Exception as e: exception_func(e) return 1
Reset color before newline
mogproject/color-ssh
diff --git a/tests/color_ssh/test_color_cat.py b/tests/color_ssh/test_color_cat.py index 390d0b0..38d50cc 100644 --- a/tests/color_ssh/test_color_cat.py +++ b/tests/color_ssh/test_color_cat.py @@ -110,8 +110,8 @@ class TestMain(TestCase): ret = color_cat.main(args, stdout=out, stderr=err) self.assertEqual(ret, 0) self.assertEqual(out.getvalue(), - b'\x1b[31mfoo\n\x1b[0m\x1b[31mbar\n\x1b[0m\x1b[31mbaz\n\x1b[0m' - b'\x1b[31m123\n\x1b[0m\x1b[31m456\n\x1b[0m\x1b[31m789\n\x1b[0m') + b'\x1b[31mfoo\x1b[0m\n\x1b[31mbar\x1b[0m\n\x1b[31mbaz\x1b[0m\n' + b'\x1b[31m123\x1b[0m\n\x1b[31m456\x1b[0m\n\x1b[31m789\x1b[0m\n') self.assertEqual(err.getvalue(), b'') def test_main_error(self): diff --git a/tests/color_ssh/test_color_ssh.py b/tests/color_ssh/test_color_ssh.py index 524e613..d035868 100644 --- a/tests/color_ssh/test_color_ssh.py +++ b/tests/color_ssh/test_color_ssh.py @@ -98,9 +98,10 @@ class TestSetting(TestCase): ]) # upload - self._check(self._parse([ + result = self._parse([ '-H', 'server-11 root@server-12', '--distribute', 'echo "foo bar"', '--upload', 'dir1/x', 'dir1/y', 'z' - ]), [ + ]) + self._check(result, [ ('server-11', ['ssh', 'server-11', 'echo', 'foo bar', 'dir1/x', 'dir1/y'], [ ['ssh', 'server-11', 'mkdir', '-p', 'dir1'], ['scp', 'dir1/x', 'server-11:dir1/x'], @@ -109,6 +110,10 @@ class TestSetting(TestCase): ('server-12', ['ssh', 'root@server-12', 'echo', 'foo bar', 'z'], [['scp', 'z', 'root@server-12:z']]), ]) + for _, cmd, setup in result.tasks: + self.assertTrue(all(isinstance(c, str) for c in cmd)) + for xs in setup: + self.assertTrue(all(isinstance(c, str) for c in xs)) def test_parse_args_error(self): with self.withBytesOutput() as (out, err): @@ -132,10 +137,10 @@ class TestMain(TestCase): def test_main_single_proc(self): # requires: POSIX environment, color-cat command def f(bs): - return b'\x1b[7m\x1b[35mtests/resources/test_color_ssh_01.sh\x1b[0m|\x1b[0m\x1b[35m' + bs + b'\n\x1b[0m' + return b'\x1b[7m\x1b[35mtests/resources/test_color_ssh_01.sh\x1b[0m|\x1b[0m\x1b[35m' + bs + b'\x1b[0m\n' def g(bs): - return b'\x1b[7m\x1b[35mtests/resources/test_color_ssh_01.sh\x1b[0m+\x1b[0m\x1b[35m' + bs + b'\n\x1b[0m' + return b'\x1b[7m\x1b[35mtests/resources/test_color_ssh_01.sh\x1b[0m+\x1b[0m\x1b[35m' + bs + b'\x1b[0m\n' with self.__with_temp_output() as (out, err): args = ['color-ssh', '--ssh', str('bash'), @@ -152,10 +157,10 @@ class TestMain(TestCase): def test_main_multi_proc(self): # requires: POSIX environment, color-cat command def f(bs): - return b'\x1b[7m\x1b[35mtests/resources/test_color_ssh_01.sh\x1b[0m|\x1b[0m\x1b[35m' + bs + b'\n\x1b[0m' + return b'\x1b[7m\x1b[35mtests/resources/test_color_ssh_01.sh\x1b[0m|\x1b[0m\x1b[35m' + bs + b'\x1b[0m\n' def g(bs): - return b'\x1b[7m\x1b[35mtests/resources/test_color_ssh_01.sh\x1b[0m+\x1b[0m\x1b[35m' + bs + b'\n\x1b[0m' + return b'\x1b[7m\x1b[35mtests/resources/test_color_ssh_01.sh\x1b[0m+\x1b[0m\x1b[35m' + bs + b'\x1b[0m\n' with self.__with_temp_output() as (out, err): path = os.path.join('tests', 'resources', 'test_color_ssh_01.sh') @@ -199,6 +204,18 @@ class TestMain(TestCase): self.assertEqual(out.read(), b'') self.assertTrue(b'No such file or directory' in err.read()) + def test_run_task_error(self): + with self.__with_temp_output() as (out, err): + ret = color_ssh.run_task(('lab', 'echo x', ['true', 'false'])) + self.assertEqual(ret, 1) + + out.seek(0) + err.seek(0) + + self.assertEqual(out.read(), b'') + self.assertEqual(b'RuntimeError: Failed to execute setup command: false\nlabel=lab, command=echo x\n', + err.read()) + @staticmethod @contextmanager def __with_temp_output(): diff --git a/tests/color_ssh/util/test_util.py b/tests/color_ssh/util/test_util.py index 03f9fc3..9eb76ff 100644 --- a/tests/color_ssh/util/test_util.py +++ b/tests/color_ssh/util/test_util.py @@ -12,6 +12,12 @@ class TestUtil(TestCase): self.assertEqual(f(), 130) + @exception_handler(lambda e: e) + def g(): + raise IOError(32, '') + + self.assertEqual(g(), 0) + def test_distribute(self): self.assertEqual(distribute(0, []), []) self.assertEqual(distribute(0, ['a']), [])
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 3, "test_score": 3 }, "num_modified_files": 4 }
0.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "six", "mog-commons>=0.2.2" ], "pre_install": null, "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/mogproject/color-ssh.git@4c8d1949137759046ddff3b922eb84148c6ffa19#egg=color_ssh coverage==7.8.0 exceptiongroup==1.2.2 iniconfig==2.1.0 Jinja2==3.1.6 MarkupSafe==3.0.2 mog-commons==0.2.3 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 pytest-cov==6.0.0 six==1.17.0 tomli==2.2.1
name: color-ssh channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.8.0 - exceptiongroup==1.2.2 - iniconfig==2.1.0 - jinja2==3.1.6 - markupsafe==3.0.2 - mog-commons==0.2.3 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - pytest-cov==6.0.0 - six==1.17.0 - tomli==2.2.1 prefix: /opt/conda/envs/color-ssh
[ "tests/color_ssh/test_color_cat.py::TestMain::test_main", "tests/color_ssh/test_color_ssh.py::TestMain::test_main_single_proc", "tests/color_ssh/util/test_util.py::TestUtil::test_exception_handler" ]
[ "tests/color_ssh/test_color_cat.py::TestSetting::test_parse_args_error" ]
[ "tests/color_ssh/test_color_cat.py::TestSetting::test_parse_args", "tests/color_ssh/test_color_cat.py::TestMain::test_main_error", "tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_args", "tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_args_error", "tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_host_error", "tests/color_ssh/test_color_ssh.py::TestMain::test_main_load_error", "tests/color_ssh/test_color_ssh.py::TestMain::test_main_multi_proc", "tests/color_ssh/test_color_ssh.py::TestMain::test_main_task_error", "tests/color_ssh/test_color_ssh.py::TestMain::test_run_task_error", "tests/color_ssh/util/test_util.py::TestUtil::test_distribute", "tests/color_ssh/util/test_util.py::TestUtil::test_distribute_error" ]
[]
null
353
sphinx-gallery__sphinx-gallery-80
b63d08d07784d6c67714af11b18f07406ee8bf66
2015-12-29 15:30:04
c1dbb14a7cc7ab8460a55999cffb5a1d90e9ab14
Titan-C: Changes visible in my read the docs fork http://sphinx-gallery-local.readthedocs.org/en/cssclean/ Where no difference shall be noticed
diff --git a/sphinx_gallery/_static/gallery.css b/sphinx_gallery/_static/gallery.css index 623003e..1fe2424 100644 --- a/sphinx_gallery/_static/gallery.css +++ b/sphinx_gallery/_static/gallery.css @@ -1,106 +1,106 @@ -div.sphx-glr-thumbContainer { +.sphx-glr-thumbcontainer { + background: #fff; + border: solid #fff 1px; + -moz-border-radius: 5px; + -webkit-border-radius: 5px; + border-radius: 5px; box-shadow: none; - background: #FFF; + float: left; margin: 5px; - padding-top: 5px; min-height: 230px; - border: solid white 1px; + padding-top: 5px; + position: relative; +} +.sphx-glr-thumbcontainer:hover { + border: solid #b4ddfc 1px; + box-shadow: 0 0 15px rgba(142, 176, 202, 0.5); +} +.sphx-glr-thumbcontainer a.internal { + bottom: 0; + display: block; + left: 0; + padding: 150px 10px 0; + position: absolute; + right: 0; + top: 0; +} +.sphx-glr-thumbcontainer p { + margin: 0 0 .1em 0; +} +.sphx-glr-thumbcontainer .figure { + margin: 10px; + width: 160px; +} +.sphx-glr-thumbcontainer img { + display: inline; + max-height: 160px; + max-width: 100%; +} +.sphx-glr-thumbcontainer[tooltip]:hover:after { + background: rgba(0, 0, 0, 0.8); -webkit-border-radius: 5px; -moz-border-radius: 5px; border-radius: 5px; - float: left; - position: relative; } - div.sphx-glr-thumbContainer:hover { - box-shadow: 0 0 15px rgba(142, 176, 202, 0.5); - border: solid #B4DDFC 1px; } - div.sphx-glr-thumbContainer a.internal { - display: block; - position: absolute; - padding: 150px 10px 0px 10px; - top: 0px; - right: 0px; - bottom: 0px; - left: 0px; } - div.sphx-glr-thumbContainer p { - margin: 0 0 .1em 0; } - div.sphx-glr-thumbContainer .figure { - margin: 10px; - width: 160px; } - div.sphx-glr-thumbContainer img { - max-width: 100%; - max-height: 160px; - display: inline; } - div.sphx-glr-thumbContainer[tooltip]:hover:after { - background: rgba(0, 0, 0, 0.8); - -webkit-border-radius: 5px; - -moz-border-radius: 5px; - border-radius: 5px; - color: white; - content: attr(tooltip); - left: 95%; - padding: 5px 15px; - position: absolute; - z-index: 98; - width: 220px; - bottom: 52%; } - div.sphx-glr-thumbContainer[tooltip]:hover:before { - content: ""; - position: absolute; - z-index: 99; - border: solid; - border-color: #333 transparent; - border-width: 18px 0px 0px 20px; - left: 85%; - bottom: 58%; } - -div.sphx-glr-script-out div.highlight { - background-color: transparent; + color: #fff; + content: attr(tooltip); + left: 95%; + padding: 5px 15px; + position: absolute; + z-index: 98; + width: 220px; + bottom: 52%; } - -p.sphx-glr-script-out { - margin: -.9ex 0ex; - color: #888; +.sphx-glr-thumbcontainer[tooltip]:hover:before { + border: solid; + border-color: #333 transparent; + border-width: 18px 0 0 20px; + bottom: 58%; + content: ''; + left: 85%; + position: absolute; + z-index: 99; } +.sphx-glr-script-out { + color: #888; + margin: 0; +} +.sphx-glr-script-out .highlight { + background-color: transparent; +} .sphx-glr-script-out pre { - overflow: auto; - word-break: break-word; - max-height: 30em; - background-color: #FAFAE2; - border: none; + background-color: #fafae2; + border: 0; margin-left: 1ex; - margin-top: 0px; + margin-top: 0; + max-height: 30em; + overflow: auto; padding-left: 1ex; + word-break: break-word; } - -p.sphx-glr-horizontal { - margin-bottom: 0px; -} - -/* Paragraph following an output are a bit more indented */ -blockquote.sphx-glr-script-out+p { - margin-top: 1.8em; +.sphx-glr-script-out + p { + margin-top: 1.8em; } -div.sphx-glr-download { - padding: 1ex; - margin: 1em auto 1ex auto; - border-radius: 4px; - max-width: 45ex; - background-color: #ffc; - border: 1px solid #C2C22D; +.sphx-glr-download { + background-color: #ffc; + border: 1px solid #c2c22d; + border-radius: 4px; + margin: 1em auto 1ex auto; + max-width: 45ex; + padding: 1ex; } - -div.sphx-glr-download a { - color: #4B4600; +.sphx-glr-download a { + color: #4b4600; } ul.sphx-glr-horizontal { - padding: 0px; - list-style: none; } - ul.sphx-glr-horizontal li { - display: inline; } - ul.sphx-glr-horizontal img { - height: auto !important; } - -/*# sourceMappingURL=gallery.css.map */ + list-style: none; + padding: 0; +} +ul.sphx-glr-horizontal li { + display: inline; +} +ul.sphx-glr-horizontal img { + height: auto !important; +} diff --git a/sphinx_gallery/_static/gallery.scss b/sphinx_gallery/_static/gallery.scss deleted file mode 100644 index 03632ce..0000000 --- a/sphinx_gallery/_static/gallery.scss +++ /dev/null @@ -1,87 +0,0 @@ -div.sphx-glr-thumbContainer { - box-shadow: none; - background: #FFF; - margin: 5px; - padding-top: 5px; - min-height: 230px; - border: solid white 1px; - -webkit-border-radius: 5px; - -moz-border-radius: 5px; - border-radius: 5px; - float:left; - position:relative; - &:hover { - box-shadow: 0 0 15px rgba(142, 176, 202, 0.5); - border: solid #B4DDFC 1px; - } - - a.internal { - display: block; - position: absolute; - padding: 150px 10px 0px 10px; - top: 0px; - right: 0px; - bottom: 0px; - left: 0px; - } - - p { - margin: 0 0 .1em 0; - } - - .figure { - margin: 10px; - width: 160px; - } - - img { - max-width: 100%; - max-height: 160px; - display: inline; - } - - &[tooltip] { - &:hover:after { - background: rgba(0,0,0,.8); - -webkit-border-radius: 5px; - -moz-border-radius: 5px; - border-radius: 5px; - color: white; - content: attr(tooltip); - left: 95%; - padding: 5px 15px; - position: absolute; - z-index: 98; - width: 220px; - bottom: 52%; - } - &:hover:before { - content: ""; - position: absolute; - z-index: 99; - border: solid; - border-color: #333 transparent; - border-width: 18px 0px 0px 20px; - left: 85%; - bottom: 58%; - } - } -} - -.sphx-glr-script-out pre { - overflow-y: auto; - max-height: 30em; -} - -ul.sphx-glr-horizontal { - padding: 0px; - list-style: none; - - li { - display: inline; - } - - img { - height: auto !important; - } -} diff --git a/sphinx_gallery/backreferences.py b/sphinx_gallery/backreferences.py index 4df5d3d..67aaca9 100644 --- a/sphinx_gallery/backreferences.py +++ b/sphinx_gallery/backreferences.py @@ -130,7 +130,7 @@ def scan_used_functions(example_file, gallery_conf): THUMBNAIL_TEMPLATE = """ .. raw:: html - <div class="sphx-glr-thumbContainer" tooltip="{snippet}"> + <div class="sphx-glr-thumbcontainer" tooltip="{snippet}"> .. only:: html diff --git a/sphinx_gallery/gen_rst.py b/sphinx_gallery/gen_rst.py index ea01e88..36b3bfd 100644 --- a/sphinx_gallery/gen_rst.py +++ b/sphinx_gallery/gen_rst.py @@ -438,14 +438,16 @@ def execute_script(code_block, example_globals, image_path, fig_count, # Depending on whether we have one or more figures, we're using a # horizontal list or a single rst call to 'image'. + image_list = "" if len(figure_list) == 1: figure_name = figure_list[0] image_list = SINGLE_IMAGE % figure_name.lstrip('/') - else: + elif len(figure_list) > 1: image_list = HLIST_HEADER for figure_name in figure_list: image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/') + except Exception: figure_list = [] image_list = '%s is not compiling:' % src_file
The syle of the CSS needs sanitizing The CSS styles should be as such <pre> element { prop1: value; prop2: value; } element2 { prop: value; } </pre>
sphinx-gallery/sphinx-gallery
diff --git a/sphinx_gallery/tests/test_backreferences.py b/sphinx_gallery/tests/test_backreferences.py index ca1542b..d820378 100644 --- a/sphinx_gallery/tests/test_backreferences.py +++ b/sphinx_gallery/tests/test_backreferences.py @@ -17,7 +17,7 @@ def test_thumbnail_div(): reference = """ .. raw:: html - <div class="sphx-glr-thumbContainer" tooltip="test formating"> + <div class="sphx-glr-thumbcontainer" tooltip="test formating"> .. only:: html @@ -42,7 +42,7 @@ def test_backref_thumbnail_div(): reference = """ .. raw:: html - <div class="sphx-glr-thumbContainer" tooltip="test formating"> + <div class="sphx-glr-thumbcontainer" tooltip="test formating"> .. only:: html
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_removed_files", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 3 }
0.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.16 babel==2.17.0 certifi==2025.1.31 charset-normalizer==3.4.1 contourpy==1.3.0 coverage==7.8.0 cycler==0.12.1 docutils==0.21.2 exceptiongroup==1.2.2 fonttools==4.56.0 idna==3.10 imagesize==1.4.1 importlib_metadata==8.6.1 importlib_resources==6.5.2 iniconfig==2.1.0 Jinja2==3.1.6 kiwisolver==1.4.7 MarkupSafe==3.0.2 matplotlib==3.9.4 nose==1.3.7 numpy==2.0.2 packaging==24.2 pillow==11.1.0 pluggy==1.5.0 Pygments==2.19.1 pyparsing==3.2.3 pytest==8.3.5 pytest-cov==6.0.0 python-dateutil==2.9.0.post0 requests==2.32.3 six==1.17.0 snowballstemmer==2.2.0 Sphinx==7.4.7 -e git+https://github.com/sphinx-gallery/sphinx-gallery.git@b63d08d07784d6c67714af11b18f07406ee8bf66#egg=sphinx_gallery sphinxcontrib-applehelp==2.0.0 sphinxcontrib-devhelp==2.0.0 sphinxcontrib-htmlhelp==2.1.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==2.0.0 sphinxcontrib-serializinghtml==2.0.0 tomli==2.2.1 urllib3==2.3.0 zipp==3.21.0
name: sphinx-gallery channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.16 - babel==2.17.0 - certifi==2025.1.31 - charset-normalizer==3.4.1 - contourpy==1.3.0 - coverage==7.8.0 - cycler==0.12.1 - docutils==0.21.2 - exceptiongroup==1.2.2 - fonttools==4.56.0 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==8.6.1 - importlib-resources==6.5.2 - iniconfig==2.1.0 - jinja2==3.1.6 - kiwisolver==1.4.7 - markupsafe==3.0.2 - matplotlib==3.9.4 - nose==1.3.7 - numpy==2.0.2 - packaging==24.2 - pillow==11.1.0 - pluggy==1.5.0 - pygments==2.19.1 - pyparsing==3.2.3 - pytest==8.3.5 - pytest-cov==6.0.0 - python-dateutil==2.9.0.post0 - requests==2.32.3 - six==1.17.0 - snowballstemmer==2.2.0 - sphinx==7.4.7 - sphinxcontrib-applehelp==2.0.0 - sphinxcontrib-devhelp==2.0.0 - sphinxcontrib-htmlhelp==2.1.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==2.0.0 - sphinxcontrib-serializinghtml==2.0.0 - tomli==2.2.1 - urllib3==2.3.0 - zipp==3.21.0 prefix: /opt/conda/envs/sphinx-gallery
[ "sphinx_gallery/tests/test_backreferences.py::test_thumbnail_div", "sphinx_gallery/tests/test_backreferences.py::test_backref_thumbnail_div" ]
[]
[]
[]
BSD 3-Clause "New" or "Revised" License
354
sympy__sympy-10331
b9802dd116bfd84c5a5f58e0b8ee90fb72ae21ae
2015-12-30 01:07:56
0241b35cae5f2adc04dc37b25f7dc9c5f00bd746
diff --git a/sympy/concrete/products.py b/sympy/concrete/products.py index 7ef8e02724..5ee924355b 100644 --- a/sympy/concrete/products.py +++ b/sympy/concrete/products.py @@ -75,7 +75,7 @@ class Product(ExprWithIntLimits): >>> Product(k**2,(k, 1, m)) Product(k**2, (k, 1, m)) >>> Product(k**2,(k, 1, m)).doit() - (factorial(m))**2 + factorial(m)**2 Wallis' product for pi: @@ -96,7 +96,7 @@ class Product(ExprWithIntLimits): Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, n)) >>> W2e = W2.doit() >>> W2e - 2**(-2*n)*4**n*(factorial(n))**2/(RisingFactorial(1/2, n)*RisingFactorial(3/2, n)) + 2**(-2*n)*4**n*factorial(n)**2/(RisingFactorial(1/2, n)*RisingFactorial(3/2, n)) >>> limit(W2e, n, oo) pi/2 @@ -109,7 +109,7 @@ class Product(ExprWithIntLimits): pi**2*Product(1 - pi**2/(4*k**2), (k, 1, n))/2 >>> Pe = P.doit() >>> Pe - pi**2*RisingFactorial(1 + pi/2, n)*RisingFactorial(-pi/2 + 1, n)/(2*(factorial(n))**2) + pi**2*RisingFactorial(1 + pi/2, n)*RisingFactorial(-pi/2 + 1, n)/(2*factorial(n)**2) >>> Pe = Pe.rewrite(gamma) >>> Pe pi**2*gamma(n + 1 + pi/2)*gamma(n - pi/2 + 1)/(2*gamma(1 + pi/2)*gamma(-pi/2 + 1)*gamma(n + 1)**2) diff --git a/sympy/polys/fields.py b/sympy/polys/fields.py index df79cfa2ca..cb98de721e 100644 --- a/sympy/polys/fields.py +++ b/sympy/polys/fields.py @@ -63,7 +63,7 @@ def sfield(exprs, *symbols, **options): >>> K Rational function field in x, exp(1/x), log(x), x**(1/3) over ZZ with lex order >>> f - (4*x**2*exp(1/x) + x*exp(1/x)*log(x))/((x**(1/3))**5) + (4*x**2*(exp(1/x)) + x*(exp(1/x))*(log(x)))/((x**(1/3))**5) """ single = False if not is_sequence(exprs): diff --git a/sympy/printing/latex.py b/sympy/printing/latex.py index 01c84ae988..6d3496cbc8 100644 --- a/sympy/printing/latex.py +++ b/sympy/printing/latex.py @@ -423,11 +423,9 @@ def _print_Pow(self, expr): elif self._settings['fold_frac_powers'] \ and expr.exp.is_Rational \ and expr.exp.q != 1: - base, p, q = self._print(expr.base), expr.exp.p, expr.exp.q + base, p, q = self.parenthesize(expr.base, PRECEDENCE['Pow']), expr.exp.p, expr.exp.q if expr.base.is_Function: return self._print(expr.base, "%s/%s" % (p, q)) - if self._needs_brackets(expr.base): - return r"\left(%s\right)^{%s/%s}" % (base, p, q) return r"%s^{%s/%s}" % (base, p, q) elif expr.exp.is_Rational and expr.exp.is_negative and expr.base.is_commutative: # Things like 1/x @@ -446,12 +444,9 @@ def _print_Pow(self, expr): #the result comes with a minus and a space, so we remove if tex[:1] == "-": return tex[1:].strip() - if self._needs_brackets(expr.base): - tex = r"\left(%s\right)^{%s}" - else: - tex = r"%s^{%s}" + tex = r"%s^{%s}" - return tex % (self._print(expr.base), + return tex % (self.parenthesize(expr.base, PRECEDENCE['Pow']), self._print(expr.exp)) def _print_Sum(self, expr): @@ -777,18 +772,12 @@ def _print_Abs(self, expr, exp=None): _print_Determinant = _print_Abs def _print_re(self, expr, exp=None): - if self._needs_brackets(expr.args[0]): - tex = r"\Re {\left (%s \right )}" % self._print(expr.args[0]) - else: - tex = r"\Re{%s}" % self._print(expr.args[0]) + tex = r"\Re{%s}" % self.parenthesize(expr.args[0], PRECEDENCE['Func']) return self._do_exponent(tex, exp) def _print_im(self, expr, exp=None): - if self._needs_brackets(expr.args[0]): - tex = r"\Im {\left ( %s \right )}" % self._print(expr.args[0]) - else: - tex = r"\Im{%s}" % self._print(expr.args[0]) + tex = r"\Im{%s}" % self.parenthesize(expr.args[0], PRECEDENCE['Func']) return self._do_exponent(tex, exp) @@ -951,11 +940,7 @@ def _print_fresnelc(self, expr, exp=None): return r"C%s" % tex def _print_subfactorial(self, expr, exp=None): - x = expr.args[0] - if self._needs_brackets(x): - tex = r"!\left(%s\right)" % self._print(x) - else: - tex = "!" + self._print(x) + tex = r"!%s" % self.parenthesize(expr.args[0], PRECEDENCE["Func"]) if exp is not None: return r"%s^{%s}" % (tex, exp) @@ -963,11 +948,7 @@ def _print_subfactorial(self, expr, exp=None): return tex def _print_factorial(self, expr, exp=None): - x = expr.args[0] - if self._needs_brackets(x): - tex = r"\left(%s\right)!" % self._print(x) - else: - tex = self._print(x) + "!" + tex = r"%s!" % self.parenthesize(expr.args[0], PRECEDENCE["Func"]) if exp is not None: return r"%s^{%s}" % (tex, exp) @@ -975,11 +956,7 @@ def _print_factorial(self, expr, exp=None): return tex def _print_factorial2(self, expr, exp=None): - x = expr.args[0] - if self._needs_brackets(x): - tex = r"\left(%s\right)!!" % self._print(x) - else: - tex = self._print(x) + "!!" + tex = r"%s!!" % self.parenthesize(expr.args[0], PRECEDENCE["Func"]) if exp is not None: return r"%s^{%s}" % (tex, exp) @@ -997,10 +974,7 @@ def _print_binomial(self, expr, exp=None): def _print_RisingFactorial(self, expr, exp=None): n, k = expr.args - if self._needs_brackets(n): - base = r"\left(%s\right)" % self._print(n) - else: - base = self._print(n) + base = r"%s" % self.parenthesize(n, PRECEDENCE['Func']) tex = r"{%s}^{\left(%s\right)}" % (base, self._print(k)) @@ -1008,10 +982,7 @@ def _print_RisingFactorial(self, expr, exp=None): def _print_FallingFactorial(self, expr, exp=None): n, k = expr.args - if self._needs_brackets(k): - sub = r"\left(%s\right)" % self._print(k) - else: - sub = self._print(k) + sub = r"%s" % self.parenthesize(k, PRECEDENCE['Func']) tex = r"{\left(%s\right)}_{%s}" % (self._print(n), sub) diff --git a/sympy/printing/precedence.py b/sympy/printing/precedence.py index 1f4b3eb874..83e503dc21 100644 --- a/sympy/printing/precedence.py +++ b/sympy/printing/precedence.py @@ -14,6 +14,7 @@ "Add": 40, "Mul": 50, "Pow": 60, + "Func": 70, "Not": 100, "Atom": 1000 } @@ -32,8 +33,7 @@ "Relational": PRECEDENCE["Relational"], "Sub": PRECEDENCE["Add"], "Not": PRECEDENCE["Not"], - "factorial": PRECEDENCE["Pow"], - "factorial2": PRECEDENCE["Pow"], + "Function" : PRECEDENCE["Func"], "NegativeInfinity": PRECEDENCE["Add"], "MatAdd": PRECEDENCE["Add"], "MatMul": PRECEDENCE["Mul"], diff --git a/sympy/series/formal.py b/sympy/series/formal.py index b7c2ded401..950bea189f 100644 --- a/sympy/series/formal.py +++ b/sympy/series/formal.py @@ -489,7 +489,7 @@ def rsolve_hypergeometric(f, x, P, Q, k, m): >>> from sympy.abc import x, k >>> rh(exp(x), x, -S.One, (k + 1), k, 1) - (Piecewise((1/(factorial(k)), Eq(Mod(k, 1), 0)), (0, True)), 1, 1) + (Piecewise((1/factorial(k), Eq(Mod(k, 1), 0)), (0, True)), 1, 1) >>> rh(ln(1 + x), x, k**2, k*(k + 1), k, 1) (Piecewise(((-1)**(k - 1)*factorial(k - 1)/RisingFactorial(2, k - 1), @@ -672,7 +672,7 @@ def solve_de(f, x, DE, order, g, k): >>> from sympy.abc import x, k, f >>> solve_de(exp(x), x, D(f(x), x) - f(x), 1, f, k) - (Piecewise((1/(factorial(k)), Eq(Mod(k, 1), 0)), (0, True)), 1, 1) + (Piecewise((1/factorial(k), Eq(Mod(k, 1), 0)), (0, True)), 1, 1) >>> solve_de(ln(1 + x), x, (x + 1)*D(f(x), x, 2) + D(f(x)), 2, f, k) (Piecewise(((-1)**(k - 1)*factorial(k - 1)/RisingFactorial(2, k - 1), @@ -717,7 +717,7 @@ def hyper_algorithm(f, x, k, order=4): >>> from sympy.abc import x, k >>> hyper_algorithm(exp(x), x, k) - (Piecewise((1/(factorial(k)), Eq(Mod(k, 1), 0)), (0, True)), 1, 1) + (Piecewise((1/factorial(k), Eq(Mod(k, 1), 0)), (0, True)), 1, 1) >>> hyper_algorithm(ln(1 + x), x, k) (Piecewise(((-1)**(k - 1)*factorial(k - 1)/RisingFactorial(2, k - 1),
Replace needs_brackets with parenthesize in the latex printer The LaTeX printer has a `_needs_brackets` method, which checks if something needs parentheses. But there is already the parenthesize module, which works more generally. Uses of `_needs_brackets` should be replaced and `_needs_brackets` should be removed or deprecated. I've already replaced one instance of this over at https://github.com/sympy/sympy/pull/10084, but I didn't bother with the rest. This shouldn't be too hard to fix, as long as you have an understanding of how operator precedence works. And hopefully there should be sufficient test coverage to avoid breaking things, although whoever fixes this should probably check that each instance of `_needs_brackets` is tested to be sure.
sympy/sympy
diff --git a/sympy/physics/quantum/tests/test_printing.py b/sympy/physics/quantum/tests/test_printing.py index 7a393d79a2..db5382ed22 100644 --- a/sympy/physics/quantum/tests/test_printing.py +++ b/sympy/physics/quantum/tests/test_printing.py @@ -85,7 +85,7 @@ def test_anticommutator(): """) assert pretty(ac_tall) == ascii_str assert upretty(ac_tall) == ucode_str - assert latex(ac_tall) == r'\left\{\left(A\right)^{2},B\right\}' + assert latex(ac_tall) == r'\left\{A^{2},B\right\}' sT(ac_tall, "AntiCommutator(Pow(Operator(Symbol('A')), Integer(2)),Operator(Symbol('B')))") @@ -194,7 +194,7 @@ def test_commutator(): """) assert pretty(c_tall) == ascii_str assert upretty(c_tall) == ucode_str - assert latex(c_tall) == r'\left[\left(A\right)^{2},B\right]' + assert latex(c_tall) == r'\left[A^{2},B\right]' sT(c_tall, "Commutator(Pow(Operator(Symbol('A')), Integer(2)),Operator(Symbol('B')))") @@ -529,7 +529,7 @@ def test_operator(): """) assert pretty(inv) == ascii_str assert upretty(inv) == ucode_str - assert latex(inv) == r'\left(A\right)^{-1}' + assert latex(inv) == r'A^{-1}' sT(inv, "Pow(Operator(Symbol('A')), Integer(-1))") assert str(d) == 'DifferentialOperator(Derivative(f(x), x),f(x))' ascii_str = \ @@ -822,7 +822,7 @@ def test_big_expr(): assert pretty(e1) == ascii_str assert upretty(e1) == ucode_str assert latex(e1) == \ - r'{\left(J_z\right)^{2}}\otimes \left({A^{\dag} + B^{\dag}}\right) \left\{\left(DifferentialOperator\left(\frac{d}{d x} f{\left (x \right )},f{\left (x \right )}\right)^{\dag}\right)^{3},A^{\dag} + B^{\dag}\right\} \left({\left\langle 1,0\right|} + {\left\langle 1,1\right|}\right) \left({\left|0,0\right\rangle } + {\left|1,-1\right\rangle }\right)' + r'{J_z^{2}}\otimes \left({A^{\dag} + B^{\dag}}\right) \left\{\left(DifferentialOperator\left(\frac{d}{d x} f{\left (x \right )},f{\left (x \right )}\right)^{\dag}\right)^{3},A^{\dag} + B^{\dag}\right\} \left({\left\langle 1,0\right|} + {\left\langle 1,1\right|}\right) \left({\left|0,0\right\rangle } + {\left|1,-1\right\rangle }\right)' sT(e1, "Mul(TensorProduct(Pow(JzOp(Symbol('J')), Integer(2)), Add(Dagger(Operator(Symbol('A'))), Dagger(Operator(Symbol('B'))))), AntiCommutator(Pow(Dagger(DifferentialOperator(Derivative(Function('f')(Symbol('x')), Symbol('x')),Function('f')(Symbol('x')))), Integer(3)),Add(Dagger(Operator(Symbol('A'))), Dagger(Operator(Symbol('B'))))), Add(JzBra(Integer(1),Integer(0)), JzBra(Integer(1),Integer(1))), Add(JzKet(Integer(0),Integer(0)), JzKet(Integer(1),Integer(-1))))") assert str(e2) == '[Jz**2,A + B]*{E**(-2),Dagger(D)*Dagger(C)}*[J2,Jz]' ascii_str = \ @@ -840,7 +840,7 @@ def test_big_expr(): assert pretty(e2) == ascii_str assert upretty(e2) == ucode_str assert latex(e2) == \ - r'\left[\left(J_z\right)^{2},A + B\right] \left\{\left(E\right)^{-2},D^{\dag} C^{\dag}\right\} \left[J^2,J_z\right]' + r'\left[J_z^{2},A + B\right] \left\{E^{-2},D^{\dag} C^{\dag}\right\} \left[J^2,J_z\right]' sT(e2, "Mul(Commutator(Pow(JzOp(Symbol('J')), Integer(2)),Add(Operator(Symbol('A')), Operator(Symbol('B')))), AntiCommutator(Pow(Operator(Symbol('E')), Integer(-2)),Mul(Dagger(Operator(Symbol('D'))), Dagger(Operator(Symbol('C'))))), Commutator(J2Op(Symbol('J')),JzOp(Symbol('J'))))") assert str(e3) == \ "Wigner3j(1, 2, 3, 4, 5, 6)*[Dagger(B) + A,C + D]x(-J2 + Jz)*|1,0><1,1|*(|1,0,j1=1,j2=1> + |1,1,j1=1,j2=1>)x|1,-1,j1=1,j2=1>" diff --git a/sympy/physics/vector/tests/test_printing.py b/sympy/physics/vector/tests/test_printing.py index d9639339c2..84472a44d8 100644 --- a/sympy/physics/vector/tests/test_printing.py +++ b/sympy/physics/vector/tests/test_printing.py @@ -129,7 +129,7 @@ def test_vector_latex_with_functions(): v = omega.diff() ** alpha * N.x - assert v._latex() == (r'\left(\dot{\omega}\right)^{\alpha}' + assert v._latex() == (r'\dot{\omega}^{\alpha}' r'\mathbf{\hat{n}_x}') diff --git a/sympy/printing/tests/test_precedence.py b/sympy/printing/tests/test_precedence.py index 9b3e0dfaec..686b574039 100644 --- a/sympy/printing/tests/test_precedence.py +++ b/sympy/printing/tests/test_precedence.py @@ -19,9 +19,10 @@ def test_Add(): def test_Function(): - assert precedence(sin(x)) == PRECEDENCE["Atom"] - assert precedence(Derivative(x, y)) == PRECEDENCE["Atom"] + assert precedence(sin(x)) == PRECEDENCE["Func"] +def test_Derivative(): + assert precedence(Derivative(x, y)) == PRECEDENCE["Atom"] def test_Integral(): assert precedence(Integral(x, y)) == PRECEDENCE["Atom"]
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 5 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///croot/attrs_1668696182826/work certifi @ file:///croot/certifi_1671487769961/work/certifi flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work mpmath==1.3.0 packaging @ file:///croot/packaging_1671697413597/work pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pytest==7.1.2 -e git+https://github.com/sympy/sympy.git@b9802dd116bfd84c5a5f58e0b8ee90fb72ae21ae#egg=sympy tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing_extensions @ file:///croot/typing_extensions_1669924550328/work zipp @ file:///croot/zipp_1672387121353/work
name: sympy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=22.1.0=py37h06a4308_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - flit-core=3.6.0=pyhd3eb1b0_0 - importlib-metadata=4.11.3=py37h06a4308_0 - importlib_metadata=4.11.3=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=22.0=py37h06a4308_0 - pip=22.3.1=py37h06a4308_0 - pluggy=1.0.0=py37h06a4308_1 - py=1.11.0=pyhd3eb1b0_0 - pytest=7.1.2=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py37h06a4308_0 - typing_extensions=4.4.0=py37h06a4308_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zipp=3.11.0=py37h06a4308_0 - zlib=1.2.13=h5eee18b_1 - pip: - mpmath==1.3.0 prefix: /opt/conda/envs/sympy
[ "sympy/physics/quantum/tests/test_printing.py::test_anticommutator", "sympy/physics/quantum/tests/test_printing.py::test_commutator", "sympy/physics/quantum/tests/test_printing.py::test_operator", "sympy/physics/quantum/tests/test_printing.py::test_big_expr", "sympy/physics/vector/tests/test_printing.py::test_vector_latex_with_functions", "sympy/printing/tests/test_precedence.py::test_Function" ]
[]
[ "sympy/physics/quantum/tests/test_printing.py::test_cg", "sympy/physics/quantum/tests/test_printing.py::test_constants", "sympy/physics/quantum/tests/test_printing.py::test_dagger", "sympy/physics/quantum/tests/test_printing.py::test_gate", "sympy/physics/quantum/tests/test_printing.py::test_hilbert", "sympy/physics/quantum/tests/test_printing.py::test_innerproduct", "sympy/physics/quantum/tests/test_printing.py::test_qexpr", "sympy/physics/quantum/tests/test_printing.py::test_qubit", "sympy/physics/quantum/tests/test_printing.py::test_spin", "sympy/physics/quantum/tests/test_printing.py::test_state", "sympy/physics/quantum/tests/test_printing.py::test_tensorproduct", "sympy/physics/vector/tests/test_printing.py::test_latex_printer", "sympy/physics/vector/tests/test_printing.py::test_vector_pretty_print", "sympy/physics/vector/tests/test_printing.py::test_vector_latex", "sympy/physics/vector/tests/test_printing.py::test_dyadic_pretty_print", "sympy/physics/vector/tests/test_printing.py::test_dyadic_latex", "sympy/printing/tests/test_precedence.py::test_Add", "sympy/printing/tests/test_precedence.py::test_Derivative", "sympy/printing/tests/test_precedence.py::test_Integral", "sympy/printing/tests/test_precedence.py::test_Mul", "sympy/printing/tests/test_precedence.py::test_Number", "sympy/printing/tests/test_precedence.py::test_Order", "sympy/printing/tests/test_precedence.py::test_Pow", "sympy/printing/tests/test_precedence.py::test_Product", "sympy/printing/tests/test_precedence.py::test_Relational", "sympy/printing/tests/test_precedence.py::test_Sum", "sympy/printing/tests/test_precedence.py::test_Symbol", "sympy/printing/tests/test_precedence.py::test_And_Or" ]
[]
BSD
356
mogproject__color-ssh-23
881561051ebae23a03aec3063877ee1cf21e24c0
2015-12-30 03:06:36
8ef23299ceb4e19e5d33562edb0066686eead51d
diff --git a/src/color_ssh/color_ssh.py b/src/color_ssh/color_ssh.py index 0534891..294a553 100644 --- a/src/color_ssh/color_ssh.py +++ b/src/color_ssh/color_ssh.py @@ -21,6 +21,9 @@ class Setting(object): ' %prog [options...] -H "[user@]hostname [[user@]hostname]...]" command' ]) DEFAULT_PARALLELISM = 32 + CMD_SSH = str('ssh') + CMD_UPLOAD = [str('rsync'), str('-a')] + CMD_MKDIR = [str('mkdir'), str('-p')] def __init__(self, parallelism=None, tasks=None): self.parallelism = parallelism @@ -39,7 +42,7 @@ class Setting(object): help='label name' ) parser.add_option( - '--ssh', dest='ssh', default=str('ssh'), type='string', metavar='SSH', + '--ssh', dest='ssh', default=self.CMD_SSH, type='string', metavar='SSH', help='override ssh command line string' ) parser.add_option( @@ -62,11 +65,16 @@ class Setting(object): '--upload', dest='upload', default=False, action='store_true', help='upload files before executing a command (all args are regarded as paths)' ) + parser.add_option( + '--upload-with', dest='upload_with', default=None, type='string', metavar='PATH', + help='file paths to be uploaded before executing a command' + ) option, args = parser.parse_args(argv[1:]) hosts = self._load_hosts(option.host_file) + (option.host_string.split() if option.host_string else []) if len(args) < (1 if hosts else 2): + print(option.__dict__) stdout.write(arg2bytes(parser.format_help().encode('utf-8'))) parser.exit(2) @@ -77,6 +85,9 @@ class Setting(object): # parse hosts parsed_hosts = [self._parse_host(h) for h in hosts] + # parse upload-with option + upload_with = [] if option.upload_with is None else shlex.split(option.upload_with) + tasks = [] if option.distribute: # distribute args @@ -84,24 +95,16 @@ class Setting(object): d = distribute(len(hosts), args) for i, (user, host, port) in enumerate(parsed_hosts): if d[i]: - setup_commands = [] - if option.upload: - # create directories - dirs = list(set(x for x in [os.path.dirname(arg) for arg in d[i]] if x != '' and x != '.')) - if dirs: - setup_commands.append( - self._ssh_args(option.ssh, user, host, port) + [str('mkdir'), str('-p')] + dirs - ) - - # upload files before executing main commands - setup_commands.extend([self._scp_args(str('rsync -a'), user, host, port, arg) for arg in d[i]]) - + upload_paths = upload_with + d[i] if option.upload else [] label = option.label or host ssh_args = self._ssh_args(option.ssh, user, host, port) - tasks.append((label, ssh_args + dist_prefix + d[i], setup_commands)) + tasks.append((label, ssh_args + dist_prefix + d[i], + self._build_upload_commands(user, host, port, option.ssh, upload_paths))) else: for user, host, port in parsed_hosts: - tasks.append((option.label or host, self._ssh_args(option.ssh, user, host, port) + args, [])) + tasks.append((option.label or host, + self._ssh_args(option.ssh, user, host, port) + args, + self._build_upload_commands(user, host, port, option.ssh, upload_with))) self.parallelism = option.parallelism self.tasks = tasks @@ -140,12 +143,25 @@ class Setting(object): [] if port is None else [str('-p'), port]) + [Setting._build_host_string(user, host)] @staticmethod - def _scp_args(scp_cmd, user, host, port, path): - return shlex.split(scp_cmd) + ([] if port is None else [str('-P'), port]) + [ + def _upload_args(user, host, port, path): + return Setting.CMD_UPLOAD + ([] if port is None else [str('-P'), port]) + [ path, Setting._build_host_string(user, host) + str(':') + path ] + @staticmethod + def _build_upload_commands(user, host, port, ssh_cmd, paths): + # create directories + dirs = list(set(x for x in [os.path.dirname(path) for path in paths] if x != '' and x != '.')) + + ret = [] + if dirs: + ret.append(Setting._ssh_args(ssh_cmd, user, host, port) + Setting.CMD_MKDIR + sorted(dirs)) + + # upload files + ret.extend([Setting._upload_args(user, host, port, path) for path in paths]) + return ret + def run_task(args): label, command, setup_commands = args
Add upload-with option
mogproject/color-ssh
diff --git a/tests/color_ssh/test_color_ssh.py b/tests/color_ssh/test_color_ssh.py index 2c22fee..ccb7c34 100644 --- a/tests/color_ssh/test_color_ssh.py +++ b/tests/color_ssh/test_color_ssh.py @@ -115,6 +115,35 @@ class TestSetting(TestCase): [['rsync', '-a', 'z', 'root@server-12:z']]), ]) + # upload-with + self._check(self._parse(['--upload-with=dir1/x', 'server-1', 'pwd']), + [('server-1', ['ssh', 'server-1', 'pwd'], [ + ['ssh', 'server-1', 'mkdir', '-p', 'dir1'], + ['rsync', '-a', 'dir1/x', 'server-1:dir1/x'], + ])]) + + self._check( + self._parse([ + '--upload-with', 'dir2/c dir2/d dir3/e', + '-H', 'server-11 root@server-12', '--distribute', 'echo "foo bar"', '--upload', 'dir1/x', 'dir1/y', 'z' + ]), [ + ('server-11', ['ssh', 'server-11', 'echo', 'foo bar', 'dir1/x', 'dir1/y'], [ + ['ssh', 'server-11', 'mkdir', '-p', 'dir1', 'dir2', 'dir3'], + ['rsync', '-a', 'dir2/c', 'server-11:dir2/c'], + ['rsync', '-a', 'dir2/d', 'server-11:dir2/d'], + ['rsync', '-a', 'dir3/e', 'server-11:dir3/e'], + ['rsync', '-a', 'dir1/x', 'server-11:dir1/x'], + ['rsync', '-a', 'dir1/y', 'server-11:dir1/y'] + ]), + ('server-12', ['ssh', 'root@server-12', 'echo', 'foo bar', 'z'], [ + ['ssh', 'root@server-12', 'mkdir', '-p', 'dir2', 'dir3'], + ['rsync', '-a', 'dir2/c', 'root@server-12:dir2/c'], + ['rsync', '-a', 'dir2/d', 'root@server-12:dir2/d'], + ['rsync', '-a', 'dir3/e', 'root@server-12:dir3/e'], + ['rsync', '-a', 'z', 'root@server-12:z'] + ]), + ]) + def test_parse_args_error(self): with self.withBytesOutput() as (out, err): self.assertSystemExit(2, Setting().parse_args, ['color-ssh'], out)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 3, "issue_text_score": 3, "test_score": 3 }, "num_modified_files": 1 }
0.1
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-cov", "six", "mog-commons>=0.2.2" ], "pre_install": null, "python": "3.5", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 -e git+https://github.com/mogproject/color-ssh.git@881561051ebae23a03aec3063877ee1cf21e24c0#egg=color_ssh coverage==6.2 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work Jinja2==3.0.3 MarkupSafe==2.0.1 mog-commons==0.2.3 more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 pytest-cov==4.0.0 six==1.17.0 toml @ file:///tmp/build/80754af9/toml_1616166611790/work tomli==1.2.3 typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: color-ssh channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==6.2 - jinja2==3.0.3 - markupsafe==2.0.1 - mog-commons==0.2.3 - pytest-cov==4.0.0 - six==1.17.0 - tomli==1.2.3 prefix: /opt/conda/envs/color-ssh
[ "tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_args" ]
[]
[ "tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_args_error", "tests/color_ssh/test_color_ssh.py::TestSetting::test_parse_host_error", "tests/color_ssh/test_color_ssh.py::TestMain::test_main_load_error", "tests/color_ssh/test_color_ssh.py::TestMain::test_main_multi_proc", "tests/color_ssh/test_color_ssh.py::TestMain::test_main_single_proc", "tests/color_ssh/test_color_ssh.py::TestMain::test_main_task_error", "tests/color_ssh/test_color_ssh.py::TestMain::test_run_task_error" ]
[]
null
357
sympy__sympy-10334
b9802dd116bfd84c5a5f58e0b8ee90fb72ae21ae
2015-12-30 07:21:27
0241b35cae5f2adc04dc37b25f7dc9c5f00bd746
diff --git a/sympy/integrals/risch.py b/sympy/integrals/risch.py index ed75b61794..9b8fea56e4 100644 --- a/sympy/integrals/risch.py +++ b/sympy/integrals/risch.py @@ -25,7 +25,7 @@ """ from __future__ import print_function, division -from sympy import real_roots, default_sort_key +from sympy import real_roots from sympy.abc import z from sympy.core.function import Lambda from sympy.core.numbers import ilcm, oo @@ -160,7 +160,7 @@ class DifferentialExtension(object): # to have a safeguard when debugging. __slots__ = ('f', 'x', 'T', 'D', 'fa', 'fd', 'Tfuncs', 'backsubs', 'E_K', 'E_args', 'L_K', 'L_args', 'cases', 'case', 't', 'd', 'newf', 'level', - 'ts',) + 'ts') def __init__(self, f=None, x=None, handle_first='log', dummy=True, extension=None, rewrite_complex=False): """ @@ -247,6 +247,7 @@ def update(seq, atoms, func): symlogs = set() while True: + restart = False if self.newf.is_rational_function(*self.T): break @@ -326,12 +327,6 @@ def update(seq, atoms, func): # ANSWER: Yes, otherwise we can't integrate x**x (or # rather prove that it has no elementary integral) # without first manually rewriting it as exp(x*log(x)) - self.newf = self.newf.xreplace({old: new}) - self.backsubs += [(new, old)] - log_new_extension = self._log_part([log(i.base)], - dummy=dummy) - exps = update(exps, self.newf.atoms(exp), lambda i: - i.exp.is_rational_function(*self.T) and i.exp.has(*self.T)) continue ans, u, const = A newterm = exp(i.exp*(log(const) + u)) @@ -382,7 +377,7 @@ def update(seq, atoms, func): self.backsubs.append((new, i)) # remove any duplicates - logs = sorted(set(logs), key=default_sort_key) + logs = list(set(logs)) if handle_first == 'exp' or not log_new_extension: exp_new_extension = self._exp_part(exps, dummy=dummy) @@ -745,7 +740,7 @@ def as_poly_1t(p, t, z): Examples ======== - >>> from sympy import random_poly + >>> from sympy import Symbol, random_poly >>> from sympy.integrals.risch import as_poly_1t >>> from sympy.abc import x, z @@ -1652,11 +1647,11 @@ def risch_integrate(f, x, extension=None, handle_first='log', >>> pprint(risch_integrate(x*x**x*log(x) + x**x + x*x**x, x)) x x*x - >>> pprint(risch_integrate(x**x, x)) + >>> pprint(risch_integrate(x**x*log(x), x)) / | | x - | x dx + | x *log(x) dx | / diff --git a/sympy/solvers/inequalities.py b/sympy/solvers/inequalities.py index 6baa44e38c..c73052a1a7 100644 --- a/sympy/solvers/inequalities.py +++ b/sympy/solvers/inequalities.py @@ -215,7 +215,7 @@ def reduce_rational_inequalities(exprs, gen, relational=True): """ exact = True eqs = [] - solution = S.EmptySet + solution = S.Reals if exprs else S.EmptySet for _exprs in exprs: _eqs = [] @@ -251,13 +251,15 @@ def reduce_rational_inequalities(exprs, gen, relational=True): if not (domain.is_ZZ or domain.is_QQ): expr = numer/denom expr = Relational(expr, 0, rel) - solution = Union(solution, solve_univariate_inequality(expr, gen, relational=False)) + solution &= solve_univariate_inequality(expr, gen, relational=False) else: _eqs.append(((numer, denom), rel)) - eqs.append(_eqs) + if _eqs: + eqs.append(_eqs) - solution = Union(solution, solve_rational_inequalities(eqs)) + if eqs: + solution &= solve_rational_inequalities(eqs) if not exact: solution = solution.evalf()
improper inequality reduction ```python >>> reduce_inequalities([x < oo, x >= 0, -oo < x]) And(-oo < x, x < oo) <--- wrong >>> And(*[x < oo, x >= 0, -oo < x]).as_set().as_relational(x) And(0 <= x, x < oo) <--- right ```
sympy/sympy
diff --git a/sympy/integrals/tests/test_risch.py b/sympy/integrals/tests/test_risch.py index a0ad2fe2ae..3258230ec1 100644 --- a/sympy/integrals/tests/test_risch.py +++ b/sympy/integrals/tests/test_risch.py @@ -461,11 +461,9 @@ def test_DifferentialExtension_log(): def test_DifferentialExtension_symlog(): - # See comment on test_risch_integrate below assert DifferentialExtension(log(x**x), x, dummy=False)._important_attrs == \ - (Poly(t0*x, t1), Poly(1, t1), [Poly(1, x), Poly(1/x, t0), Poly((t0 + - 1)*t1, t1)], [x, t0, t1], [Lambda(i, log(i)), Lambda(i, exp(i*t0))], - [(exp(x*log(x)), x**x)], [2], [t0*x], [1], [x]) + (Poly(x*t0, t0), Poly(1, t0), [Poly(1, x), Poly(1/x, t0)], [x, t0], + [Lambda(i, log(i))], [(x*log(x), log(x**x))], [], [], [1], [x]) assert DifferentialExtension(log(x**y), x, dummy=False)._important_attrs == \ (Poly(y*t0, t0), Poly(1, t0), [Poly(1, x), Poly(1/x, t0)], [x, t0], [Lambda(i, log(i))], [(y*log(x), log(x**y))], [], [], [1], [x]) @@ -646,14 +644,7 @@ def test_risch_integrate(): # These are tested here in addition to in test_DifferentialExtension above # (symlogs) to test that backsubs works correctly. The integrals should be # written in terms of the original logarithms in the integrands. - - # XXX: Unfortunately, making backsubs work on this one is a little - # trickier, because x**x is converted to exp(x*log(x)), and so log(x**x) - # is converted to x*log(x). (x**2*log(x)).subs(x*log(x), log(x**x)) is - # smart enough, the issue is that these splits happen at different places - # in the algorithm. Maybe a heuristic is in order - assert risch_integrate(log(x**x), x) == x**2*log(x)/2 - x**2/4 - + assert risch_integrate(log(x**x), x) == x*log(x**x)/2 - x**2/4 assert risch_integrate(log(x**y), x) == x*log(x**y) - x*y assert risch_integrate(log(sqrt(x)), x) == x*log(sqrt(x)) - x/2 @@ -667,8 +658,3 @@ def test_NonElementaryIntegral(): assert isinstance(risch_integrate(x**x*log(x), x), NonElementaryIntegral) # Make sure methods of Integral still give back a NonElementaryIntegral assert isinstance(NonElementaryIntegral(x**x*t0, x).subs(t0, log(x)), NonElementaryIntegral) - -def test_xtothex(): - a = risch_integrate(x**x, x) - assert a == NonElementaryIntegral(x**x, x) - assert isinstance(a, NonElementaryIntegral) diff --git a/sympy/solvers/tests/test_inequalities.py b/sympy/solvers/tests/test_inequalities.py index ef606209a4..5ab366e2cd 100644 --- a/sympy/solvers/tests/test_inequalities.py +++ b/sympy/solvers/tests/test_inequalities.py @@ -168,6 +168,10 @@ def test_reduce_rational_inequalities_real_relational(): relational=False) == \ Union(Interval.Lopen(-oo, -2), Interval.Lopen(0, 4)) + # issue sympy/sympy#10237 + assert reduce_rational_inequalities( + [[x < oo, x >= 0, -oo < x]], x, relational=False) == Interval(0, oo) + def test_reduce_abs_inequalities(): e = abs(x - 5) < 3
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 3, "test_score": 0 }, "num_modified_files": 2 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": [], "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///croot/attrs_1668696182826/work certifi @ file:///croot/certifi_1671487769961/work/certifi coverage==7.2.7 execnet==2.0.2 flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work mpmath==1.3.0 packaging @ file:///croot/packaging_1671697413597/work pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pytest==7.1.2 pytest-asyncio==0.21.2 pytest-cov==4.1.0 pytest-mock==3.11.1 pytest-xdist==3.5.0 -e git+https://github.com/sympy/sympy.git@b9802dd116bfd84c5a5f58e0b8ee90fb72ae21ae#egg=sympy tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing_extensions @ file:///croot/typing_extensions_1669924550328/work zipp @ file:///croot/zipp_1672387121353/work
name: sympy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=22.1.0=py37h06a4308_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - flit-core=3.6.0=pyhd3eb1b0_0 - importlib-metadata=4.11.3=py37h06a4308_0 - importlib_metadata=4.11.3=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=22.0=py37h06a4308_0 - pip=22.3.1=py37h06a4308_0 - pluggy=1.0.0=py37h06a4308_1 - py=1.11.0=pyhd3eb1b0_0 - pytest=7.1.2=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py37h06a4308_0 - typing_extensions=4.4.0=py37h06a4308_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zipp=3.11.0=py37h06a4308_0 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.2.7 - execnet==2.0.2 - mpmath==1.3.0 - pytest-asyncio==0.21.2 - pytest-cov==4.1.0 - pytest-mock==3.11.1 - pytest-xdist==3.5.0 prefix: /opt/conda/envs/sympy
[ "sympy/integrals/tests/test_risch.py::test_DifferentialExtension_symlog", "sympy/integrals/tests/test_risch.py::test_risch_integrate", "sympy/solvers/tests/test_inequalities.py::test_reduce_rational_inequalities_real_relational" ]
[]
[ "sympy/integrals/tests/test_risch.py::test_gcdex_diophantine", "sympy/integrals/tests/test_risch.py::test_frac_in", "sympy/integrals/tests/test_risch.py::test_as_poly_1t", "sympy/integrals/tests/test_risch.py::test_derivation", "sympy/integrals/tests/test_risch.py::test_splitfactor", "sympy/integrals/tests/test_risch.py::test_canonical_representation", "sympy/integrals/tests/test_risch.py::test_hermite_reduce", "sympy/integrals/tests/test_risch.py::test_polynomial_reduce", "sympy/integrals/tests/test_risch.py::test_laurent_series", "sympy/integrals/tests/test_risch.py::test_recognize_derivative", "sympy/integrals/tests/test_risch.py::test_recognize_log_derivative", "sympy/integrals/tests/test_risch.py::test_residue_reduce", "sympy/integrals/tests/test_risch.py::test_integrate_hyperexponential", "sympy/integrals/tests/test_risch.py::test_integrate_hyperexponential_polynomial", "sympy/integrals/tests/test_risch.py::test_integrate_hyperexponential_returns_piecewise", "sympy/integrals/tests/test_risch.py::test_integrate_primitive", "sympy/integrals/tests/test_risch.py::test_integrate_hypertangent_polynomial", "sympy/integrals/tests/test_risch.py::test_integrate_nonlinear_no_specials", "sympy/integrals/tests/test_risch.py::test_integer_powers", "sympy/integrals/tests/test_risch.py::test_DifferentialExtension_exp", "sympy/integrals/tests/test_risch.py::test_DifferentialExtension_log", "sympy/integrals/tests/test_risch.py::test_DifferentialExtension_handle_first", "sympy/integrals/tests/test_risch.py::test_DifferentialExtension_all_attrs", "sympy/integrals/tests/test_risch.py::test_DifferentialExtension_extension_flag", "sympy/integrals/tests/test_risch.py::test_DifferentialExtension_misc", "sympy/integrals/tests/test_risch.py::test_DifferentialExtension_Rothstein", "sympy/integrals/tests/test_risch.py::test_DecrementLevel", "sympy/integrals/tests/test_risch.py::test_risch_integrate_float", "sympy/integrals/tests/test_risch.py::test_NonElementaryIntegral", "sympy/solvers/tests/test_inequalities.py::test_solve_poly_inequality", "sympy/solvers/tests/test_inequalities.py::test_reduce_poly_inequalities_real_interval", "sympy/solvers/tests/test_inequalities.py::test_reduce_poly_inequalities_complex_relational", "sympy/solvers/tests/test_inequalities.py::test_reduce_abs_inequalities", "sympy/solvers/tests/test_inequalities.py::test_reduce_inequalities_general", "sympy/solvers/tests/test_inequalities.py::test_reduce_inequalities_boolean", "sympy/solvers/tests/test_inequalities.py::test_reduce_inequalities_multivariate", "sympy/solvers/tests/test_inequalities.py::test_reduce_inequalities_errors", "sympy/solvers/tests/test_inequalities.py::test_hacky_inequalities", "sympy/solvers/tests/test_inequalities.py::test_issue_6343", "sympy/solvers/tests/test_inequalities.py::test_issue_8235", "sympy/solvers/tests/test_inequalities.py::test_issue_5526", "sympy/solvers/tests/test_inequalities.py::test_solve_univariate_inequality", "sympy/solvers/tests/test_inequalities.py::test_issue_9954", "sympy/solvers/tests/test_inequalities.py::test_slow_general_univariate", "sympy/solvers/tests/test_inequalities.py::test_issue_8545", "sympy/solvers/tests/test_inequalities.py::test_issue_8974", "sympy/solvers/tests/test_inequalities.py::test_issue_10047", "sympy/solvers/tests/test_inequalities.py::test_issue_10268" ]
[]
BSD
358
sympy__sympy-10335
b9802dd116bfd84c5a5f58e0b8ee90fb72ae21ae
2015-12-30 07:59:00
0241b35cae5f2adc04dc37b25f7dc9c5f00bd746
diff --git a/sympy/geometry/entity.py b/sympy/geometry/entity.py index af3ed9fad8..5f9d07ea95 100644 --- a/sympy/geometry/entity.py +++ b/sympy/geometry/entity.py @@ -258,7 +258,7 @@ def encloses(self, o): elif isinstance(o, Ray) or isinstance(o, Line): return False elif isinstance(o, Ellipse): - return self.encloses_point(o.center) and not self.intersection(o) + return self.encloses_point(o.center) and not self.intersection(o) and self.encloses_point(Point(o.center.x+o.hradius,o.center.y)) elif isinstance(o, Polygon): if isinstance(o, RegularPolygon): if not self.encloses_point(o.center):
geometry's encloses method fails for non-polygons The perimeter of objects that are not polygons are not properly accounted for with the encloses method so a circle whose center is in a triangle but is larger than the triangle is reported as being enclosed by the triangle: https://groups.google.com/forum/#!topic/sympy/vhSScImk3dc ``` >>> var('a') a >>> t=Triangle(sss=(1,3,3)) >>> c=t.circumcenter >>> t.intersection(Circle(c,3)) # no intersection between the two [] >>> t.encloses(Circle(c,3)) True ```
sympy/sympy
diff --git a/sympy/geometry/tests/test_polygon.py b/sympy/geometry/tests/test_polygon.py index 37604e5392..cef5a5cf05 100644 --- a/sympy/geometry/tests/test_polygon.py +++ b/sympy/geometry/tests/test_polygon.py @@ -3,7 +3,7 @@ from sympy import Abs, Rational, Float, S, Symbol, cos, pi, sqrt, oo from sympy.functions.elementary.trigonometric import tan -from sympy.geometry import (Circle, GeometryError, Point, Polygon, Ray, RegularPolygon, Segment, Triangle, are_similar, +from sympy.geometry import (Circle, Ellipse, GeometryError, Point, Polygon, Ray, RegularPolygon, Segment, Triangle, are_similar, convex_hull, intersection, Line) from sympy.utilities.pytest import raises from sympy.utilities.randtest import verify_numerically @@ -78,6 +78,8 @@ def test_polygon(): assert p5.encloses_point(Point(1, 3)) assert p5.encloses_point(Point(0, 0)) is False assert p5.encloses_point(Point(4, 0)) is False + assert p1.encloses(Circle(Point(2.5,2.5),5)) is False + assert p1.encloses(Ellipse(Point(2.5,2),5,6)) is False p5.plot_interval('x') == [x, 0, 1] assert p5.distance( Polygon(Point(10, 10), Point(14, 14), Point(10, 14))) == 6 * sqrt(2)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "mpmath>=0.19", "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 coverage==6.2 execnet==1.9.0 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work mpmath==1.3.0 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 pytest-asyncio==0.16.0 pytest-cov==4.0.0 pytest-mock==3.6.1 pytest-xdist==3.0.2 -e git+https://github.com/sympy/sympy.git@b9802dd116bfd84c5a5f58e0b8ee90fb72ae21ae#egg=sympy toml @ file:///tmp/build/80754af9/toml_1616166611790/work tomli==1.2.3 typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: sympy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==6.2 - execnet==1.9.0 - mpmath==1.3.0 - pytest-asyncio==0.16.0 - pytest-cov==4.0.0 - pytest-mock==3.6.1 - pytest-xdist==3.0.2 - tomli==1.2.3 prefix: /opt/conda/envs/sympy
[ "sympy/geometry/tests/test_polygon.py::test_polygon" ]
[]
[ "sympy/geometry/tests/test_polygon.py::test_convex_hull", "sympy/geometry/tests/test_polygon.py::test_encloses", "sympy/geometry/tests/test_polygon.py::test_triangle_kwargs", "sympy/geometry/tests/test_polygon.py::test_transform", "sympy/geometry/tests/test_polygon.py::test_reflect" ]
[]
BSD
359
sympy__sympy-10340
1403bcb511def74d90178ff0f1d83f4412bc1096
2015-12-31 06:07:39
0241b35cae5f2adc04dc37b25f7dc9c5f00bd746
diff --git a/sympy/sets/sets.py b/sympy/sets/sets.py index ac7a486dba..f033f647fb 100644 --- a/sympy/sets/sets.py +++ b/sympy/sets/sets.py @@ -206,7 +206,7 @@ def _complement(self, other): return Union(o - self for o in other.args) elif isinstance(other, Complement): - return Complement(other.args[0], Union(other.args[1], self)) + return Complement(other.args[0], Union(other.args[1], self), evaluate=False) elif isinstance(other, EmptySet): return S.EmptySet
sets.Complement fails on certain Unions `sympy.sets.Complement` fails on certain unions. That is, it seems to get stuck in a loop. To recreate from sympy import S from sympy.sets import * a = Interval(0,1) + Interval(2,3) # this works Complement( S.Reals, a ) # this gets stuck in a loop Complement( S.UniversalSet, a) The expected behaviour is for it to output `UniversalSet() \ ([0,1] U [2,3])`
sympy/sympy
diff --git a/sympy/sets/tests/test_sets.py b/sympy/sets/tests/test_sets.py index 1afe34c7bb..ec7538cdd0 100644 --- a/sympy/sets/tests/test_sets.py +++ b/sympy/sets/tests/test_sets.py @@ -161,7 +161,6 @@ def test_Complement(): assert S.Reals - Union(S.Naturals, FiniteSet(pi)) == \ Intersection(S.Reals - S.Naturals, S.Reals - FiniteSet(pi)) - def test_complement(): assert Interval(0, 1).complement(S.Reals) == \ Union(Interval(-oo, 0, True, True), Interval(1, oo, True, True)) @@ -941,3 +940,11 @@ def test_issue_10113(): def test_issue_10248(): assert list(Intersection(S.Reals, FiniteSet(x))) == [ And(x < oo, x > -oo)] + + +def test_issue_9447(): + a = Interval(0, 1) + Interval(2, 3) + assert Complement(S.UniversalSet, a) == Complement( + S.UniversalSet, Union(Interval(0, 1), Interval(2, 3)), evaluate=False) + assert Complement(S.Naturals, a) == Complement( + S.Naturals, Union(Interval(0, 1), Interval(2, 3)), evaluate=False)
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 1 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": [], "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///croot/attrs_1668696182826/work certifi @ file:///croot/certifi_1671487769961/work/certifi coverage==7.2.7 execnet==2.0.2 flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work mpmath==1.3.0 packaging @ file:///croot/packaging_1671697413597/work pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pytest==7.1.2 pytest-asyncio==0.21.2 pytest-cov==4.1.0 pytest-mock==3.11.1 pytest-xdist==3.5.0 -e git+https://github.com/sympy/sympy.git@1403bcb511def74d90178ff0f1d83f4412bc1096#egg=sympy tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing_extensions @ file:///croot/typing_extensions_1669924550328/work zipp @ file:///croot/zipp_1672387121353/work
name: sympy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=22.1.0=py37h06a4308_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - flit-core=3.6.0=pyhd3eb1b0_0 - importlib-metadata=4.11.3=py37h06a4308_0 - importlib_metadata=4.11.3=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=22.0=py37h06a4308_0 - pip=22.3.1=py37h06a4308_0 - pluggy=1.0.0=py37h06a4308_1 - py=1.11.0=pyhd3eb1b0_0 - pytest=7.1.2=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py37h06a4308_0 - typing_extensions=4.4.0=py37h06a4308_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zipp=3.11.0=py37h06a4308_0 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.2.7 - execnet==2.0.2 - mpmath==1.3.0 - pytest-asyncio==0.21.2 - pytest-cov==4.1.0 - pytest-mock==3.11.1 - pytest-xdist==3.5.0 prefix: /opt/conda/envs/sympy
[ "sympy/sets/tests/test_sets.py::test_issue_9447" ]
[]
[ "sympy/sets/tests/test_sets.py::test_interval_arguments", "sympy/sets/tests/test_sets.py::test_interval_symbolic_end_points", "sympy/sets/tests/test_sets.py::test_union", "sympy/sets/tests/test_sets.py::test_difference", "sympy/sets/tests/test_sets.py::test_Complement", "sympy/sets/tests/test_sets.py::test_complement", "sympy/sets/tests/test_sets.py::test_intersect", "sympy/sets/tests/test_sets.py::test_intersection", "sympy/sets/tests/test_sets.py::test_issue_9623", "sympy/sets/tests/test_sets.py::test_is_disjoint", "sympy/sets/tests/test_sets.py::test_ProductSet_of_single_arg_is_arg", "sympy/sets/tests/test_sets.py::test_interval_subs", "sympy/sets/tests/test_sets.py::test_interval_to_mpi", "sympy/sets/tests/test_sets.py::test_measure", "sympy/sets/tests/test_sets.py::test_is_subset", "sympy/sets/tests/test_sets.py::test_is_proper_subset", "sympy/sets/tests/test_sets.py::test_is_superset", "sympy/sets/tests/test_sets.py::test_is_proper_superset", "sympy/sets/tests/test_sets.py::test_contains", "sympy/sets/tests/test_sets.py::test_interval_symbolic", "sympy/sets/tests/test_sets.py::test_union_contains", "sympy/sets/tests/test_sets.py::test_is_number", "sympy/sets/tests/test_sets.py::test_Interval_is_left_unbounded", "sympy/sets/tests/test_sets.py::test_Interval_is_right_unbounded", "sympy/sets/tests/test_sets.py::test_Interval_as_relational", "sympy/sets/tests/test_sets.py::test_Finite_as_relational", "sympy/sets/tests/test_sets.py::test_Union_as_relational", "sympy/sets/tests/test_sets.py::test_Intersection_as_relational", "sympy/sets/tests/test_sets.py::test_EmptySet", "sympy/sets/tests/test_sets.py::test_finite_basic", "sympy/sets/tests/test_sets.py::test_powerset", "sympy/sets/tests/test_sets.py::test_product_basic", "sympy/sets/tests/test_sets.py::test_real", "sympy/sets/tests/test_sets.py::test_supinf", "sympy/sets/tests/test_sets.py::test_universalset", "sympy/sets/tests/test_sets.py::test_Union_of_ProductSets_shares", "sympy/sets/tests/test_sets.py::test_Interval_free_symbols", "sympy/sets/tests/test_sets.py::test_image_interval", "sympy/sets/tests/test_sets.py::test_image_piecewise", "sympy/sets/tests/test_sets.py::test_image_FiniteSet", "sympy/sets/tests/test_sets.py::test_image_Union", "sympy/sets/tests/test_sets.py::test_image_EmptySet", "sympy/sets/tests/test_sets.py::test_issue_5724_7680", "sympy/sets/tests/test_sets.py::test_boundary", "sympy/sets/tests/test_sets.py::test_boundary_Union", "sympy/sets/tests/test_sets.py::test_boundary_ProductSet", "sympy/sets/tests/test_sets.py::test_boundary_ProductSet_line", "sympy/sets/tests/test_sets.py::test_is_open", "sympy/sets/tests/test_sets.py::test_is_closed", "sympy/sets/tests/test_sets.py::test_closure", "sympy/sets/tests/test_sets.py::test_interior", "sympy/sets/tests/test_sets.py::test_issue_7841", "sympy/sets/tests/test_sets.py::test_Eq", "sympy/sets/tests/test_sets.py::test_SymmetricDifference", "sympy/sets/tests/test_sets.py::test_issue_9536", "sympy/sets/tests/test_sets.py::test_issue_9637", "sympy/sets/tests/test_sets.py::test_issue_9808", "sympy/sets/tests/test_sets.py::test_issue_9956", "sympy/sets/tests/test_sets.py::test_issue_Symbol_inter", "sympy/sets/tests/test_sets.py::test_issue_10113", "sympy/sets/tests/test_sets.py::test_issue_10248" ]
[]
BSD
360
sympy__sympy-10343
7e1dda39709367e36e605444a918b31aaf709d67
2015-12-31 16:20:44
0241b35cae5f2adc04dc37b25f7dc9c5f00bd746
diff --git a/sympy/sets/sets.py b/sympy/sets/sets.py index ac7a486dba..aecf54fe36 100644 --- a/sympy/sets/sets.py +++ b/sympy/sets/sets.py @@ -964,9 +964,10 @@ def _boundary(self): return FiniteSet(self.start, self.end) def _contains(self, other): - if other.is_real is False: + if other.is_real is False or other is S.NegativeInfinity or other is S.Infinity: return false + if self.start is S.NegativeInfinity and self.end is S.Infinity: if not other.is_real is None: return other.is_real
Interval(-oo, oo) contains oo Related : #9706 ```python In [ ]: foo = Interval(-oo, oo) In [ ]: foo.contains(-oo) Out[ ]: True In [ ]: foo.contains(oo) Out[ ]: True ``` Ping @asmeurer
sympy/sympy
diff --git a/sympy/interactive/tests/test_ipythonprinting.py b/sympy/interactive/tests/test_ipythonprinting.py index 211e6cd417..9cb6e1f9db 100644 --- a/sympy/interactive/tests/test_ipythonprinting.py +++ b/sympy/interactive/tests/test_ipythonprinting.py @@ -64,8 +64,7 @@ def test_print_builtin_option(): else: text = app.user_ns['a'][0]['text/plain'] raises(KeyError, lambda: app.user_ns['a'][0]['text/latex']) - # Note : Unicode of Python2 is equivalent to str in Python3. In Python 3 we have one - # text type: str which holds Unicode data and two byte types bytes and bytearray. + # Note : In Python 3 the text is unicode, but in 2 it is a string. # XXX: How can we make this ignore the terminal width? This test fails if # the terminal is too narrow. assert text in ("{pi: 3.14, n_i: 3}", @@ -101,8 +100,7 @@ def test_print_builtin_option(): else: text = app.user_ns['a'][0]['text/plain'] raises(KeyError, lambda: app.user_ns['a'][0]['text/latex']) - # Note : Unicode of Python2 is equivalent to str in Python3. In Python 3 we have one - # text type: str which holds Unicode data and two byte types bytes and bytearray. + # Note : In Python 3 the text is unicode, but in 2 it is a string. # Python 3.3.3 + IPython 0.13.2 gives: '{n_i: 3, pi: 3.14}' # Python 3.3.3 + IPython 1.1.0 gives: '{n_i: 3, pi: 3.14}' # Python 2.7.5 + IPython 1.1.0 gives: '{pi: 3.14, n_i: 3}' diff --git a/sympy/sets/tests/test_contains.py b/sympy/sets/tests/test_contains.py index c48e04f052..ab1c06c26c 100644 --- a/sympy/sets/tests/test_contains.py +++ b/sympy/sets/tests/test_contains.py @@ -1,4 +1,4 @@ -from sympy import Symbol, Contains, S, Interval, FiniteSet +from sympy import Symbol, Contains, S, Interval, FiniteSet, oo def test_contains_basic(): @@ -15,3 +15,8 @@ def test_issue_6194(): assert Contains(x, FiniteSet(0)) != S.false assert Contains(x, Interval(1, 1)) != S.false assert Contains(x, S.Integers) != S.false + + +def test_issue_10326(): + assert Contains(oo, Interval(-oo, oo)) == False + assert Contains(-oo, Interval(-oo, oo)) == False
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_issue_reference" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 1 }, "num_modified_files": 1 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.4", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work mpmath==1.3.0 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 -e git+https://github.com/sympy/sympy.git@7e1dda39709367e36e605444a918b31aaf709d67#egg=sympy toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: sympy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - mpmath==1.3.0 prefix: /opt/conda/envs/sympy
[ "sympy/sets/tests/test_contains.py::test_issue_10326" ]
[]
[ "sympy/sets/tests/test_contains.py::test_contains_basic", "sympy/sets/tests/test_contains.py::test_issue_6194" ]
[]
BSD
361
sympy__sympy-10346
dc736e61e76a58d87ddc1fcc994e28192cccaee7
2016-01-01 19:15:44
0241b35cae5f2adc04dc37b25f7dc9c5f00bd746
smichr: > precision limit When I run your loops (after defining copysign) I find that the first failure occurs for taking the sqrt of a negative number. Although ceiling of an imaginary number works, comparison of that imaginary result via relational will not work: ``` >>> x=-1020 >>> x,rf,rc=(x, floor(sqrt(x)), ceiling(sqrt(x))) >>> assert (rf == rc) == ((rf**2) == x) >>> assert (rc-1)**2 < x and rc**2 >= x Traceback (most recent call last): File "<stdin>", line 1, in <module> File "sympy\core\relational.py", line 202, in __nonzero__ raise TypeError("cannot determine truth value of Relational") TypeError: cannot determine truth value of Relational >>> (rc-1)**2 < x (-1 + 32*I)**2 < -1020 >>> expand(_) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "sympy\core\function.py", line 2018, in expand return sympify(e).expand(deep=deep, modulus=modulus, **hints) File "sympy\core\cache.py", line 95, in wrapper retval = func(*args, **kwargs) File "sympy\core\expr.py", line 2942, in expand expr, hit = Expr._expand_hint(expr, hint, deep=deep, **hints) File "sympy\core\expr.py", line 2880, in _expand_hint expr = expr.func(*sargs) File "sympy\core\relational.py", line 407, in __new__ r = cls._eval_relation(lhs, rhs) File "sympy\core\relational.py", line 742, in _eval_relation return _sympify(lhs.__lt__(rhs)) File "sympy\core\expr.py", line 299, in __lt__ raise TypeError("Invalid comparison of complex %s" % me) TypeError: Invalid comparison of complex -1023 - 64*I ``` smichr: Regarding the apparent failure for `ceiling(sqrt(2**3000 + 1)) == 2**1500 + 1`: ```python >>> type(ceiling(sqrt(2**3000 + 1))) ceiling >>> ceiling(sqrt(2**3000 + 1)) - (2**1500+1) Traceback (most recent call last): ... sympy.core.evalf.PrecisionExhausted ``` The value remains unevaluated so that's why the equality fails. And if you try to calculate the difference between the two, you get an error because the allowed precision was exhausted in trying to find the difference.
diff --git a/.mailmap b/.mailmap index 7f8494ae58..97e8aa9f66 100644 --- a/.mailmap +++ b/.mailmap @@ -221,9 +221,3 @@ Sampad Kumar Saha <[email protected]> sampadsaha5 <[email protected]> Jiaxing Liang <[email protected]> unknown <[email protected]> Jiaxing Liang <[email protected]> liangjiaxing <[email protected]> Jens Jørgen Mortensen <[email protected]> jjmortensen <[email protected]> -Björn Dahlgren <[email protected]> Björn Dahlhren <[email protected]> -Shekhar Prasad Rajak <[email protected]> Shekhar Prasad Rajak <[email protected]> -Arafat Dad Khan <[email protected]> Arafat <[email protected]> -Aqnouch Mohammed <[email protected]> AQNOUCH Mohammed <[email protected]> -Meghana Madhyastha <[email protected]> Meghana <[email protected]> -Tanu Hari Dixit <[email protected]> tokencolour <[email protected]> diff --git a/AUTHORS b/AUTHORS index 13adde6e78..d9f1362c5b 100644 --- a/AUTHORS +++ b/AUTHORS @@ -458,9 +458,3 @@ Jens Jørgen Mortensen <[email protected]> Sampad Kumar Saha <[email protected]> Eva Charlotte Mayer <[email protected]> Laura Domine <[email protected]> -Justin Blythe <[email protected]> -Meghana Madhyastha <[email protected]> -Tanu Hari Dixit <[email protected]> -Shekhar Prasad Rajak <[email protected]> -Aqnouch Mohammed <[email protected]> -Arafat Dad Khan <[email protected]> diff --git a/LICENSE b/LICENSE index df5f217d42..01be471668 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2006-2016 SymPy Development Team +Copyright (c) 2006-2015 SymPy Development Team All rights reserved. diff --git a/doc/src/aboutus.rst b/doc/src/aboutus.rst index 972c7d42b3..281892d5fc 100644 --- a/doc/src/aboutus.rst +++ b/doc/src/aboutus.rst @@ -431,7 +431,6 @@ want to be mentioned here, so see our repository history for a full list). #. Matthew Davis: Fixed documentation typos #. Jack Kemp: Fix incorrect assignment of free variables in linsolve #. Kshitij Saraogi: Parenthesized printing of Intersections -#. Arafat Dad Khan: improve detection of polygon/ellipse intersection Up-to-date list in the order of the first contribution is given in the `AUTHORS <https://github.com/sympy/sympy/blob/master/AUTHORS>`_ file. diff --git a/sympy/assumptions/satask.py b/sympy/assumptions/satask.py index 114e98bfa4..64f0136ca5 100644 --- a/sympy/assumptions/satask.py +++ b/sympy/assumptions/satask.py @@ -36,8 +36,7 @@ def satask(proposition, assumptions=True, context=global_assumptions, def get_relevant_facts(proposition, assumptions=(True,), - context=global_assumptions, use_known_facts=True, exprs=None, - relevant_facts=None): + context=global_assumptions, use_known_facts=True, exprs=None, relevant_facts=None): newexprs = set() if not exprs: @@ -76,10 +75,8 @@ def get_all_relevant_facts(proposition, assumptions=True, relevant_facts = set() exprs = None while exprs != set(): - (relevant_facts, exprs) = get_relevant_facts(proposition, - And.make_args(assumptions), context, - use_known_facts=use_known_facts, exprs=exprs, - relevant_facts=relevant_facts) + (relevant_facts, exprs) = get_relevant_facts(proposition, And.make_args(assumptions), + context, use_known_facts=use_known_facts, exprs=exprs, relevant_facts=relevant_facts) i += 1 if i >= iterations: return And(*relevant_facts) diff --git a/sympy/core/evalf.py b/sympy/core/evalf.py index 2fc8d987b1..c568ae1277 100644 --- a/sympy/core/evalf.py +++ b/sympy/core/evalf.py @@ -291,7 +291,7 @@ def get_integer_part(expr, no, options, return_ints=False): Note: this function either gives the exact result or signals failure. """ - import sympy + from sympy.functions.elementary.complexes import re, im # The expression is likely less than 2^30 or so assumed_size = 30 ire, iim, ire_acc, iim_acc = evalf(expr, assumed_size, options) @@ -318,10 +318,31 @@ def get_integer_part(expr, no, options, return_ints=False): # must also calculate whether the difference to the nearest integer is # positive or negative (which may fail if very close). def calc_part(expr, nexpr): - from sympy import Add + from sympy.core.add import Add nint = int(to_int(nexpr, rnd)) n, c, p, b = nexpr - if (c != 1 and p != 0) or p < 0: + is_int = (p == 0) + if not is_int: + # if there are subs and they all contain integer re/im parts + # then we can (hopefully) safely substitute them into the + # expression + s = options.get('subs', False) + if s: + doit = True + from sympy.core.compatibility import as_int + for v in s.values(): + try: + as_int(v) + except ValueError: + try: + [as_int(i) for i in v.as_real_imag()] + continue + except (ValueError, AttributeError): + doit = False + break + if doit: + expr = expr.subs(s) + expr = Add(expr, -nint, evaluate=False) x, _, x_acc, _ = evalf(expr, 10, options) try: @@ -334,16 +355,16 @@ def calc_part(expr, nexpr): nint = from_int(nint) return nint, fastlog(nint) + 10 - re, im, re_acc, im_acc = None, None, None, None + re_, im_, re_acc, im_acc = None, None, None, None if ire: - re, re_acc = calc_part(sympy.re(expr, evaluate=False), ire) + re_, re_acc = calc_part(re(expr, evaluate=False), ire) if iim: - im, im_acc = calc_part(sympy.im(expr, evaluate=False), iim) + im_, im_acc = calc_part(im(expr, evaluate=False), iim) if return_ints: - return int(to_int(re or fzero)), int(to_int(im or fzero)) - return re, im, re_acc, im_acc + return int(to_int(re_ or fzero)), int(to_int(im_ or fzero)) + return re_, im_, re_acc, im_acc def evalf_ceiling(expr, prec, options): diff --git a/sympy/crypto/crypto.py b/sympy/crypto/crypto.py index ee55da732e..41dbdbbb98 100644 --- a/sympy/crypto/crypto.py +++ b/sympy/crypto/crypto.py @@ -1557,7 +1557,7 @@ def encipher_elgamal(m, puk): """ if m > puk[0]: - raise ValueError('Message {} should be less than prime {}'.format(m, puk[0])) + ValueError('Message {} should be less than prime {}'.format(m, puk[0])) r = randrange(2, puk[0]) return pow(puk[1], r, puk[0]), m * pow(puk[2], r, puk[0]) % puk[0] @@ -1708,6 +1708,6 @@ def dh_shared_key(puk, b): """ p, _, x = puk if 1 >= b or b >= p: - raise ValueError('Value of b should be greater 1 and less than prime {}'\ + ValueError('Value of b should be greater 1 and less than prime {}'\ .format(p)) return pow(x, b, p) diff --git a/sympy/geometry/entity.py b/sympy/geometry/entity.py index 5f9d07ea95..af3ed9fad8 100644 --- a/sympy/geometry/entity.py +++ b/sympy/geometry/entity.py @@ -258,7 +258,7 @@ def encloses(self, o): elif isinstance(o, Ray) or isinstance(o, Line): return False elif isinstance(o, Ellipse): - return self.encloses_point(o.center) and not self.intersection(o) and self.encloses_point(Point(o.center.x+o.hradius,o.center.y)) + return self.encloses_point(o.center) and not self.intersection(o) elif isinstance(o, Polygon): if isinstance(o, RegularPolygon): if not self.encloses_point(o.center): diff --git a/sympy/integrals/risch.py b/sympy/integrals/risch.py index ed75b61794..9b8fea56e4 100644 --- a/sympy/integrals/risch.py +++ b/sympy/integrals/risch.py @@ -25,7 +25,7 @@ """ from __future__ import print_function, division -from sympy import real_roots, default_sort_key +from sympy import real_roots from sympy.abc import z from sympy.core.function import Lambda from sympy.core.numbers import ilcm, oo @@ -160,7 +160,7 @@ class DifferentialExtension(object): # to have a safeguard when debugging. __slots__ = ('f', 'x', 'T', 'D', 'fa', 'fd', 'Tfuncs', 'backsubs', 'E_K', 'E_args', 'L_K', 'L_args', 'cases', 'case', 't', 'd', 'newf', 'level', - 'ts',) + 'ts') def __init__(self, f=None, x=None, handle_first='log', dummy=True, extension=None, rewrite_complex=False): """ @@ -247,6 +247,7 @@ def update(seq, atoms, func): symlogs = set() while True: + restart = False if self.newf.is_rational_function(*self.T): break @@ -326,12 +327,6 @@ def update(seq, atoms, func): # ANSWER: Yes, otherwise we can't integrate x**x (or # rather prove that it has no elementary integral) # without first manually rewriting it as exp(x*log(x)) - self.newf = self.newf.xreplace({old: new}) - self.backsubs += [(new, old)] - log_new_extension = self._log_part([log(i.base)], - dummy=dummy) - exps = update(exps, self.newf.atoms(exp), lambda i: - i.exp.is_rational_function(*self.T) and i.exp.has(*self.T)) continue ans, u, const = A newterm = exp(i.exp*(log(const) + u)) @@ -382,7 +377,7 @@ def update(seq, atoms, func): self.backsubs.append((new, i)) # remove any duplicates - logs = sorted(set(logs), key=default_sort_key) + logs = list(set(logs)) if handle_first == 'exp' or not log_new_extension: exp_new_extension = self._exp_part(exps, dummy=dummy) @@ -745,7 +740,7 @@ def as_poly_1t(p, t, z): Examples ======== - >>> from sympy import random_poly + >>> from sympy import Symbol, random_poly >>> from sympy.integrals.risch import as_poly_1t >>> from sympy.abc import x, z @@ -1652,11 +1647,11 @@ def risch_integrate(f, x, extension=None, handle_first='log', >>> pprint(risch_integrate(x*x**x*log(x) + x**x + x*x**x, x)) x x*x - >>> pprint(risch_integrate(x**x, x)) + >>> pprint(risch_integrate(x**x*log(x), x)) / | | x - | x dx + | x *log(x) dx | / diff --git a/sympy/physics/vector/frame.py b/sympy/physics/vector/frame.py index 6d6d1b2c55..5f693bc166 100644 --- a/sympy/physics/vector/frame.py +++ b/sympy/physics/vector/frame.py @@ -1,5 +1,5 @@ from sympy import (diff, trigsimp, expand, sin, cos, solve, Symbol, sympify, - eye, symbols, Dummy, ImmutableMatrix as Matrix) + eye, ImmutableMatrix as Matrix) from sympy.core.compatibility import string_types, u, range from sympy.physics.vector.vector import Vector, _check_vector @@ -586,7 +586,7 @@ def _rot(axis, angle): from sympy.polys.polyerrors import CoercionFailed from sympy.physics.vector.functions import kinematic_equations q1, q2, q3 = amounts - u1, u2, u3 = symbols('u1, u2, u3', cls=Dummy) + u1, u2, u3 = dynamicsymbols('u1, u2, u3') templist = kinematic_equations([u1, u2, u3], [q1, q2, q3], rot_type, rot_order) templist = [expand(i) for i in templist] diff --git a/sympy/sets/sets.py b/sympy/sets/sets.py index d57020821e..ac7a486dba 100644 --- a/sympy/sets/sets.py +++ b/sympy/sets/sets.py @@ -206,7 +206,7 @@ def _complement(self, other): return Union(o - self for o in other.args) elif isinstance(other, Complement): - return Complement(other.args[0], Union(other.args[1], self), evaluate=False) + return Complement(other.args[0], Union(other.args[1], self)) elif isinstance(other, EmptySet): return S.EmptySet @@ -964,10 +964,9 @@ def _boundary(self): return FiniteSet(self.start, self.end) def _contains(self, other): - if other.is_real is False or other is S.NegativeInfinity or other is S.Infinity: + if other.is_real is False: return false - if self.start is S.NegativeInfinity and self.end is S.Infinity: if not other.is_real is None: return other.is_real diff --git a/sympy/solvers/inequalities.py b/sympy/solvers/inequalities.py index c73052a1a7..6baa44e38c 100644 --- a/sympy/solvers/inequalities.py +++ b/sympy/solvers/inequalities.py @@ -215,7 +215,7 @@ def reduce_rational_inequalities(exprs, gen, relational=True): """ exact = True eqs = [] - solution = S.Reals if exprs else S.EmptySet + solution = S.EmptySet for _exprs in exprs: _eqs = [] @@ -251,15 +251,13 @@ def reduce_rational_inequalities(exprs, gen, relational=True): if not (domain.is_ZZ or domain.is_QQ): expr = numer/denom expr = Relational(expr, 0, rel) - solution &= solve_univariate_inequality(expr, gen, relational=False) + solution = Union(solution, solve_univariate_inequality(expr, gen, relational=False)) else: _eqs.append(((numer, denom), rel)) - if _eqs: - eqs.append(_eqs) + eqs.append(_eqs) - if eqs: - solution &= solve_rational_inequalities(eqs) + solution = Union(solution, solve_rational_inequalities(eqs)) if not exact: solution = solution.evalf() diff --git a/sympy/tensor/array/ndim_array.py b/sympy/tensor/array/ndim_array.py index 5846504a34..3700f076c7 100644 --- a/sympy/tensor/array/ndim_array.py +++ b/sympy/tensor/array/ndim_array.py @@ -112,10 +112,6 @@ def _handle_ndarray_creation_inputs(cls, iterable=None, shape=None, **kwargs): if shape is None and iterable is None: shape = () iterable = () - # Construction from another `NDimArray`: - elif shape is None and isinstance(iterable, NDimArray): - shape = iterable.shape - iterable = list(iterable) # Construct N-dim array from an iterable (numpy arrays included): elif shape is None and isinstance(iterable, collections.Iterable): iterable, shape = cls._scan_iterable_shape(iterable) @@ -190,37 +186,6 @@ def rank(self): """ return self._rank - def diff(self, *args): - """ - Calculate the derivative of each element in the array. - - Examples - ======== - - >>> from sympy.tensor.array import ImmutableDenseNDimArray - >>> from sympy.abc import x, y - >>> M = ImmutableDenseNDimArray([[x, y], [1, x*y]]) - >>> M.diff(x) - [[1, 0], [0, y]] - - """ - return type(self)(map(lambda x: x.diff(*args), self), self.shape) - - def applyfunc(self, f): - """Apply a function to each element of the N-dim array. - - Examples - ======== - - >>> from sympy.tensor.array import ImmutableDenseNDimArray - >>> m = ImmutableDenseNDimArray([i*2+j for i in range(2) for j in range(2)], (2, 2)) - >>> m - [[0, 1], [2, 3]] - >>> m.applyfunc(lambda i: 2*i) - [[0, 2], [4, 6]] - """ - return type(self)(map(f, self), self.shape) - def __str__(self): """Returns string, allows to use standard functions print() and str(). diff --git a/sympy/tensor/array/sparse_ndim_array.py b/sympy/tensor/array/sparse_ndim_array.py index 66979cf01e..e1bb779dc3 100644 --- a/sympy/tensor/array/sparse_ndim_array.py +++ b/sympy/tensor/array/sparse_ndim_array.py @@ -1,8 +1,6 @@ from __future__ import print_function, division import functools -import itertools - from sympy.core.sympify import _sympify from sympy import S, Dict, flatten, SparseMatrix, Basic, Tuple @@ -36,30 +34,12 @@ def __getitem__(self, index): 2 """ - # `index` is a tuple with one or more slices: - if isinstance(index, tuple) and any([isinstance(i, slice) for i in index]): - - def slice_expand(s, dim): - if not isinstance(s, slice): - return (s,) - start, stop, step = s.indices(dim) - return [start + i*step for i in range((stop-start)//step)] - - sl_factors = [slice_expand(i, dim) for (i, dim) in zip(index, self.shape)] - eindices = itertools.product(*sl_factors) - array = [self._sparse_array.get(self._parse_index(i), S.Zero) for i in eindices] - nshape = [len(el) for i, el in enumerate(sl_factors) if isinstance(index[i], slice)] - return type(self)(array, nshape) + index = self._parse_index(index) + + if index in self._sparse_array: + return self._sparse_array[index] else: - # `index` is a single slice: - if isinstance(index, slice): - start, stop, step = index.indices(self._loop_size) - retvec = [self._sparse_array.get(ind, S.Zero) for ind in range(start, stop, step)] - return retvec - # `index` is a number or a tuple without any slice: - else: - index = self._parse_index(index) - return self._sparse_array.get(index, S.Zero) + return S.Zero @classmethod def zeros(cls, *shape):
bad ceiling(sqrt(big integer)) ```python import sympy ll = 1206577996382235787095214 # lower limit for x**2 x = sympy.ceiling(sympy.sqrt(ll)) assert (x-1)**2 < ll and x**2 >= ll ``` fails in a python3 notebook on jupyter.org.
sympy/sympy
diff --git a/sympy/core/tests/test_evalf.py b/sympy/core/tests/test_evalf.py index adf7ec2fa4..dbee33fe96 100644 --- a/sympy/core/tests/test_evalf.py +++ b/sympy/core/tests/test_evalf.py @@ -243,6 +243,9 @@ def test_evalf_integer_parts(): assert ceiling(x).evalf(subs={x: 3}) == 3 assert ceiling(x).evalf(subs={x: 3*I}) == 3*I assert ceiling(x).evalf(subs={x: 2 + 3*I}) == 2 + 3*I + assert ceiling(x).evalf(subs={x: 3.}) == 3 + assert ceiling(x).evalf(subs={x: 3.*I}) == 3*I + assert ceiling(x).evalf(subs={x: 2. + 3*I}) == 2 + 3*I def test_evalf_trig_zero_detection(): @@ -473,3 +476,7 @@ def test_issue_9326(): d2 = Dummy('d') e = d1 + d2 assert e.evalf(subs = {d1: 1, d2: 2}) == 3 + + +def test_issue_10323(): + assert ceiling(sqrt(2**30 + 1)) == 2**15 + 1 diff --git a/sympy/crypto/tests/test_crypto.py b/sympy/crypto/tests/test_crypto.py index e6af68682f..688ccc09fe 100644 --- a/sympy/crypto/tests/test_crypto.py +++ b/sympy/crypto/tests/test_crypto.py @@ -255,7 +255,6 @@ def test_elgamal(): ek = elgamal_public_key(dk) m = 12345 assert m == decipher_elgamal(encipher_elgamal(m, ek), dk) - raises(ValueError, lambda: encipher_elgamal(2000, (1031, 14, 212))) def test_dh_private_key(): p, g, _ = dh_private_key(digit = 100) @@ -276,4 +275,3 @@ def test_dh_shared_key(): b = randrange(2, p) sk = dh_shared_key((p, _, ga), b) assert sk == pow(ga, b, p) - raises(ValueError, lambda: dh_shared_key((1031, 14, 565), 2000)) diff --git a/sympy/geometry/tests/test_polygon.py b/sympy/geometry/tests/test_polygon.py index cef5a5cf05..37604e5392 100644 --- a/sympy/geometry/tests/test_polygon.py +++ b/sympy/geometry/tests/test_polygon.py @@ -3,7 +3,7 @@ from sympy import Abs, Rational, Float, S, Symbol, cos, pi, sqrt, oo from sympy.functions.elementary.trigonometric import tan -from sympy.geometry import (Circle, Ellipse, GeometryError, Point, Polygon, Ray, RegularPolygon, Segment, Triangle, are_similar, +from sympy.geometry import (Circle, GeometryError, Point, Polygon, Ray, RegularPolygon, Segment, Triangle, are_similar, convex_hull, intersection, Line) from sympy.utilities.pytest import raises from sympy.utilities.randtest import verify_numerically @@ -78,8 +78,6 @@ def test_polygon(): assert p5.encloses_point(Point(1, 3)) assert p5.encloses_point(Point(0, 0)) is False assert p5.encloses_point(Point(4, 0)) is False - assert p1.encloses(Circle(Point(2.5,2.5),5)) is False - assert p1.encloses(Ellipse(Point(2.5,2),5,6)) is False p5.plot_interval('x') == [x, 0, 1] assert p5.distance( Polygon(Point(10, 10), Point(14, 14), Point(10, 14))) == 6 * sqrt(2) diff --git a/sympy/integrals/tests/test_risch.py b/sympy/integrals/tests/test_risch.py index a0ad2fe2ae..3258230ec1 100644 --- a/sympy/integrals/tests/test_risch.py +++ b/sympy/integrals/tests/test_risch.py @@ -461,11 +461,9 @@ def test_DifferentialExtension_log(): def test_DifferentialExtension_symlog(): - # See comment on test_risch_integrate below assert DifferentialExtension(log(x**x), x, dummy=False)._important_attrs == \ - (Poly(t0*x, t1), Poly(1, t1), [Poly(1, x), Poly(1/x, t0), Poly((t0 + - 1)*t1, t1)], [x, t0, t1], [Lambda(i, log(i)), Lambda(i, exp(i*t0))], - [(exp(x*log(x)), x**x)], [2], [t0*x], [1], [x]) + (Poly(x*t0, t0), Poly(1, t0), [Poly(1, x), Poly(1/x, t0)], [x, t0], + [Lambda(i, log(i))], [(x*log(x), log(x**x))], [], [], [1], [x]) assert DifferentialExtension(log(x**y), x, dummy=False)._important_attrs == \ (Poly(y*t0, t0), Poly(1, t0), [Poly(1, x), Poly(1/x, t0)], [x, t0], [Lambda(i, log(i))], [(y*log(x), log(x**y))], [], [], [1], [x]) @@ -646,14 +644,7 @@ def test_risch_integrate(): # These are tested here in addition to in test_DifferentialExtension above # (symlogs) to test that backsubs works correctly. The integrals should be # written in terms of the original logarithms in the integrands. - - # XXX: Unfortunately, making backsubs work on this one is a little - # trickier, because x**x is converted to exp(x*log(x)), and so log(x**x) - # is converted to x*log(x). (x**2*log(x)).subs(x*log(x), log(x**x)) is - # smart enough, the issue is that these splits happen at different places - # in the algorithm. Maybe a heuristic is in order - assert risch_integrate(log(x**x), x) == x**2*log(x)/2 - x**2/4 - + assert risch_integrate(log(x**x), x) == x*log(x**x)/2 - x**2/4 assert risch_integrate(log(x**y), x) == x*log(x**y) - x*y assert risch_integrate(log(sqrt(x)), x) == x*log(sqrt(x)) - x/2 @@ -667,8 +658,3 @@ def test_NonElementaryIntegral(): assert isinstance(risch_integrate(x**x*log(x), x), NonElementaryIntegral) # Make sure methods of Integral still give back a NonElementaryIntegral assert isinstance(NonElementaryIntegral(x**x*t0, x).subs(t0, log(x)), NonElementaryIntegral) - -def test_xtothex(): - a = risch_integrate(x**x, x) - assert a == NonElementaryIntegral(x**x, x) - assert isinstance(a, NonElementaryIntegral) diff --git a/sympy/interactive/tests/test_ipythonprinting.py b/sympy/interactive/tests/test_ipythonprinting.py index 211e6cd417..9cb6e1f9db 100644 --- a/sympy/interactive/tests/test_ipythonprinting.py +++ b/sympy/interactive/tests/test_ipythonprinting.py @@ -64,8 +64,7 @@ def test_print_builtin_option(): else: text = app.user_ns['a'][0]['text/plain'] raises(KeyError, lambda: app.user_ns['a'][0]['text/latex']) - # Note : Unicode of Python2 is equivalent to str in Python3. In Python 3 we have one - # text type: str which holds Unicode data and two byte types bytes and bytearray. + # Note : In Python 3 the text is unicode, but in 2 it is a string. # XXX: How can we make this ignore the terminal width? This test fails if # the terminal is too narrow. assert text in ("{pi: 3.14, n_i: 3}", @@ -101,8 +100,7 @@ def test_print_builtin_option(): else: text = app.user_ns['a'][0]['text/plain'] raises(KeyError, lambda: app.user_ns['a'][0]['text/latex']) - # Note : Unicode of Python2 is equivalent to str in Python3. In Python 3 we have one - # text type: str which holds Unicode data and two byte types bytes and bytearray. + # Note : In Python 3 the text is unicode, but in 2 it is a string. # Python 3.3.3 + IPython 0.13.2 gives: '{n_i: 3, pi: 3.14}' # Python 3.3.3 + IPython 1.1.0 gives: '{n_i: 3, pi: 3.14}' # Python 2.7.5 + IPython 1.1.0 gives: '{pi: 3.14, n_i: 3}' diff --git a/sympy/physics/vector/tests/test_frame.py b/sympy/physics/vector/tests/test_frame.py index 153f477fed..f66a953d6d 100644 --- a/sympy/physics/vector/tests/test_frame.py +++ b/sympy/physics/vector/tests/test_frame.py @@ -164,9 +164,3 @@ class MyReferenceFrame(ReferenceFrame): B = MyReferenceFrame('B') C = B.orientnew('C', 'Axis', [0, B.x]) assert isinstance(C, MyReferenceFrame) - - -def test_issue_10348(): - u = dynamicsymbols('u:3') - I = ReferenceFrame('I') - A = I.orientnew('A', 'space', u, 'XYZ') diff --git a/sympy/sets/tests/test_contains.py b/sympy/sets/tests/test_contains.py index ab1c06c26c..c48e04f052 100644 --- a/sympy/sets/tests/test_contains.py +++ b/sympy/sets/tests/test_contains.py @@ -1,4 +1,4 @@ -from sympy import Symbol, Contains, S, Interval, FiniteSet, oo +from sympy import Symbol, Contains, S, Interval, FiniteSet def test_contains_basic(): @@ -15,8 +15,3 @@ def test_issue_6194(): assert Contains(x, FiniteSet(0)) != S.false assert Contains(x, Interval(1, 1)) != S.false assert Contains(x, S.Integers) != S.false - - -def test_issue_10326(): - assert Contains(oo, Interval(-oo, oo)) == False - assert Contains(-oo, Interval(-oo, oo)) == False diff --git a/sympy/sets/tests/test_sets.py b/sympy/sets/tests/test_sets.py index ec7538cdd0..1afe34c7bb 100644 --- a/sympy/sets/tests/test_sets.py +++ b/sympy/sets/tests/test_sets.py @@ -161,6 +161,7 @@ def test_Complement(): assert S.Reals - Union(S.Naturals, FiniteSet(pi)) == \ Intersection(S.Reals - S.Naturals, S.Reals - FiniteSet(pi)) + def test_complement(): assert Interval(0, 1).complement(S.Reals) == \ Union(Interval(-oo, 0, True, True), Interval(1, oo, True, True)) @@ -940,11 +941,3 @@ def test_issue_10113(): def test_issue_10248(): assert list(Intersection(S.Reals, FiniteSet(x))) == [ And(x < oo, x > -oo)] - - -def test_issue_9447(): - a = Interval(0, 1) + Interval(2, 3) - assert Complement(S.UniversalSet, a) == Complement( - S.UniversalSet, Union(Interval(0, 1), Interval(2, 3)), evaluate=False) - assert Complement(S.Naturals, a) == Complement( - S.Naturals, Union(Interval(0, 1), Interval(2, 3)), evaluate=False) diff --git a/sympy/solvers/tests/test_inequalities.py b/sympy/solvers/tests/test_inequalities.py index 5ab366e2cd..ef606209a4 100644 --- a/sympy/solvers/tests/test_inequalities.py +++ b/sympy/solvers/tests/test_inequalities.py @@ -168,10 +168,6 @@ def test_reduce_rational_inequalities_real_relational(): relational=False) == \ Union(Interval.Lopen(-oo, -2), Interval.Lopen(0, 4)) - # issue sympy/sympy#10237 - assert reduce_rational_inequalities( - [[x < oo, x >= 0, -oo < x]], x, relational=False) == Interval(0, oo) - def test_reduce_abs_inequalities(): e = abs(x - 5) < 3 diff --git a/sympy/tensor/array/tests/test_immutable_ndim_array.py b/sympy/tensor/array/tests/test_immutable_ndim_array.py index cda2e13afb..8bfbdf3984 100644 --- a/sympy/tensor/array/tests/test_immutable_ndim_array.py +++ b/sympy/tensor/array/tests/test_immutable_ndim_array.py @@ -257,31 +257,3 @@ def test_slices(): assert md[0, 1:2, :].tomatrix() == Matrix([[14, 15, 16, 17]]) assert md[0, 1:3, :].tomatrix() == Matrix([[14, 15, 16, 17], [18, 19, 20, 21]]) assert md[:, :, :] == md - - sd = ImmutableSparseNDimArray(range(10, 34), (2, 3, 4)) - assert sd == ImmutableSparseNDimArray(md) - - assert sd[:] == md._array - assert sd[:] == list(sd) - assert sd[:, :, 0].tomatrix() == Matrix([[10, 14, 18], [22, 26, 30]]) - assert sd[0, 1:2, :].tomatrix() == Matrix([[14, 15, 16, 17]]) - assert sd[0, 1:3, :].tomatrix() == Matrix([[14, 15, 16, 17], [18, 19, 20, 21]]) - assert sd[:, :, :] == sd - - -def test_diff_and_applyfunc(): - from sympy.abc import x, y, z - md = ImmutableDenseNDimArray([[x, y], [x*z, x*y*z]]) - assert md.diff(x) == ImmutableDenseNDimArray([[1, 0], [z, y*z]]) - - sd = ImmutableSparseNDimArray(md) - assert sd == ImmutableSparseNDimArray([x, y, x*z, x*y*z], (2, 2)) - assert sd.diff(x) == ImmutableSparseNDimArray([[1, 0], [z, y*z]]) - - mdn = md.applyfunc(lambda x: x*3) - assert mdn == ImmutableDenseNDimArray([[3*x, 3*y], [3*x*z, 3*x*y*z]]) - assert md != mdn - - sdn = sd.applyfunc(lambda x: x/2) - assert sdn == ImmutableSparseNDimArray([[x/2, y/2], [x*z/2, x*y*z/2]]) - assert sd != sdn diff --git a/sympy/tensor/array/tests/test_mutable_ndim_array.py b/sympy/tensor/array/tests/test_mutable_ndim_array.py index b49e5aefd3..1920ec26db 100644 --- a/sympy/tensor/array/tests/test_mutable_ndim_array.py +++ b/sympy/tensor/array/tests/test_mutable_ndim_array.py @@ -238,33 +238,3 @@ def test_higher_dimenions(): m3_other = MutableDenseNDimArray([[[10, 11, 12, 13], [14, 15, 16, 17], [18, 19, 20, 21]], [[22, 23, 24, 25], [26, 27, 28, 29], [30, 31, 32, 33]]], (2, 3, 4)) assert m3 == m3_other - - -def test_slices(): - md = MutableDenseNDimArray(range(10, 34), (2, 3, 4)) - - assert md[:] == md._array - assert md[:, :, 0].tomatrix() == Matrix([[10, 14, 18], [22, 26, 30]]) - assert md[0, 1:2, :].tomatrix() == Matrix([[14, 15, 16, 17]]) - assert md[0, 1:3, :].tomatrix() == Matrix([[14, 15, 16, 17], [18, 19, 20, 21]]) - assert md[:, :, :] == md - - sd = MutableSparseNDimArray(range(10, 34), (2, 3, 4)) - assert sd == MutableSparseNDimArray(md) - - assert sd[:] == md._array - assert sd[:] == list(sd) - assert sd[:, :, 0].tomatrix() == Matrix([[10, 14, 18], [22, 26, 30]]) - assert sd[0, 1:2, :].tomatrix() == Matrix([[14, 15, 16, 17]]) - assert sd[0, 1:3, :].tomatrix() == Matrix([[14, 15, 16, 17], [18, 19, 20, 21]]) - assert sd[:, :, :] == sd - - -def test_diff(): - from sympy.abc import x, y, z - md = MutableDenseNDimArray([[x, y], [x*z, x*y*z]]) - assert md.diff(x) == MutableDenseNDimArray([[1, 0], [z, y*z]]) - - sd = MutableSparseNDimArray(md) - assert sd == MutableSparseNDimArray([x, y, x*z, x*y*z], (2, 2)) - assert sd.diff(x) == MutableSparseNDimArray([[1, 0], [z, y*z]])
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 14 }
0.7
{ "env_vars": null, "env_yml_path": [], "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "", "pip_packages": [ "pytest" ], "pre_install": [], "python": "3.6", "reqs_path": [], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 importlib-metadata==4.8.3 iniconfig==1.1.1 mpmath==1.3.0 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 -e git+https://github.com/sympy/sympy.git@dc736e61e76a58d87ddc1fcc994e28192cccaee7#egg=sympy tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: sympy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - mpmath==1.3.0 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/sympy
[ "sympy/core/tests/test_evalf.py::test_issue_10323", "sympy/integrals/tests/test_risch.py::test_DifferentialExtension_symlog", "sympy/integrals/tests/test_risch.py::test_risch_integrate" ]
[]
[ "sympy/core/tests/test_evalf.py::test_evalf_helpers", "sympy/core/tests/test_evalf.py::test_evalf_basic", "sympy/core/tests/test_evalf.py::test_cancellation", "sympy/core/tests/test_evalf.py::test_evalf_powers", "sympy/core/tests/test_evalf.py::test_evalf_rump", "sympy/core/tests/test_evalf.py::test_evalf_complex", "sympy/core/tests/test_evalf.py::test_evalf_complex_powers", "sympy/core/tests/test_evalf.py::test_evalf_exponentiation", "sympy/core/tests/test_evalf.py::test_evalf_complex_cancellation", "sympy/core/tests/test_evalf.py::test_evalf_logs", "sympy/core/tests/test_evalf.py::test_evalf_trig", "sympy/core/tests/test_evalf.py::test_evalf_near_integers", "sympy/core/tests/test_evalf.py::test_evalf_ramanujan", "sympy/core/tests/test_evalf.py::test_evalf_bugs", "sympy/core/tests/test_evalf.py::test_evalf_integer_parts", "sympy/core/tests/test_evalf.py::test_evalf_trig_zero_detection", "sympy/core/tests/test_evalf.py::test_evalf_sum", "sympy/core/tests/test_evalf.py::test_evalf_divergent_series", "sympy/core/tests/test_evalf.py::test_evalf_product", "sympy/core/tests/test_evalf.py::test_evalf_py_methods", "sympy/core/tests/test_evalf.py::test_evalf_power_subs_bugs", "sympy/core/tests/test_evalf.py::test_evalf_arguments", "sympy/core/tests/test_evalf.py::test_implemented_function_evalf", "sympy/core/tests/test_evalf.py::test_evaluate_false", "sympy/core/tests/test_evalf.py::test_evalf_relational", "sympy/core/tests/test_evalf.py::test_issue_5486", "sympy/core/tests/test_evalf.py::test_issue_5486_bug", "sympy/core/tests/test_evalf.py::test_bugs", "sympy/core/tests/test_evalf.py::test_subs", "sympy/core/tests/test_evalf.py::test_issue_4956_5204", "sympy/core/tests/test_evalf.py::test_old_docstring", "sympy/core/tests/test_evalf.py::test_issue_4806", "sympy/core/tests/test_evalf.py::test_evalf_mul", "sympy/core/tests/test_evalf.py::test_scaled_zero", "sympy/core/tests/test_evalf.py::test_chop_value", "sympy/core/tests/test_evalf.py::test_infinities", "sympy/core/tests/test_evalf.py::test_to_mpmath", "sympy/core/tests/test_evalf.py::test_issue_6632_evalf", "sympy/core/tests/test_evalf.py::test_issue_4945", "sympy/core/tests/test_evalf.py::test_evalf_integral", "sympy/core/tests/test_evalf.py::test_issue_8821_highprec_from_str", "sympy/core/tests/test_evalf.py::test_issue_8853", "sympy/core/tests/test_evalf.py::test_issue_9326", "sympy/crypto/tests/test_crypto.py::test_alphabet_of_cipher", "sympy/crypto/tests/test_crypto.py::test_cycle_list", "sympy/crypto/tests/test_crypto.py::test_encipher_shift", "sympy/crypto/tests/test_crypto.py::test_encipher_affine", "sympy/crypto/tests/test_crypto.py::test_encipher_substitution", "sympy/crypto/tests/test_crypto.py::test_encipher_vigenere", "sympy/crypto/tests/test_crypto.py::test_decipher_vigenere", "sympy/crypto/tests/test_crypto.py::test_encipher_hill", "sympy/crypto/tests/test_crypto.py::test_decipher_hill", "sympy/crypto/tests/test_crypto.py::test_encipher_bifid5", "sympy/crypto/tests/test_crypto.py::test_bifid5_square", "sympy/crypto/tests/test_crypto.py::test_decipher_bifid5", "sympy/crypto/tests/test_crypto.py::test_bifid7_square", "sympy/crypto/tests/test_crypto.py::test_encipher_bifid7", "sympy/crypto/tests/test_crypto.py::test_encipher_bifid6", "sympy/crypto/tests/test_crypto.py::test_decipher_bifid6", "sympy/crypto/tests/test_crypto.py::test_bifid6_square", "sympy/crypto/tests/test_crypto.py::test_rsa_public_key", "sympy/crypto/tests/test_crypto.py::test_rsa_private_key", "sympy/crypto/tests/test_crypto.py::test_encipher_rsa", "sympy/crypto/tests/test_crypto.py::test_decipher_rsa", "sympy/crypto/tests/test_crypto.py::test_kid_rsa_public_key", "sympy/crypto/tests/test_crypto.py::test_kid_rsa_private_key", "sympy/crypto/tests/test_crypto.py::test_encipher_kid_rsa", "sympy/crypto/tests/test_crypto.py::test_decipher_kid_rsa", "sympy/crypto/tests/test_crypto.py::test_encode_morse", "sympy/crypto/tests/test_crypto.py::test_decode_morse", "sympy/crypto/tests/test_crypto.py::test_lfsr_sequence", "sympy/crypto/tests/test_crypto.py::test_lfsr_autocorrelation", "sympy/crypto/tests/test_crypto.py::test_lfsr_connection_polynomial", "sympy/crypto/tests/test_crypto.py::test_elgamal_private_key", "sympy/crypto/tests/test_crypto.py::test_elgamal", "sympy/crypto/tests/test_crypto.py::test_dh_private_key", "sympy/crypto/tests/test_crypto.py::test_dh_public_key", "sympy/crypto/tests/test_crypto.py::test_dh_shared_key", "sympy/geometry/tests/test_polygon.py::test_polygon", "sympy/geometry/tests/test_polygon.py::test_convex_hull", "sympy/geometry/tests/test_polygon.py::test_encloses", "sympy/geometry/tests/test_polygon.py::test_triangle_kwargs", "sympy/geometry/tests/test_polygon.py::test_transform", "sympy/geometry/tests/test_polygon.py::test_reflect", "sympy/integrals/tests/test_risch.py::test_gcdex_diophantine", "sympy/integrals/tests/test_risch.py::test_frac_in", "sympy/integrals/tests/test_risch.py::test_as_poly_1t", "sympy/integrals/tests/test_risch.py::test_derivation", "sympy/integrals/tests/test_risch.py::test_splitfactor", "sympy/integrals/tests/test_risch.py::test_canonical_representation", "sympy/integrals/tests/test_risch.py::test_hermite_reduce", "sympy/integrals/tests/test_risch.py::test_polynomial_reduce", "sympy/integrals/tests/test_risch.py::test_laurent_series", "sympy/integrals/tests/test_risch.py::test_recognize_derivative", "sympy/integrals/tests/test_risch.py::test_recognize_log_derivative", "sympy/integrals/tests/test_risch.py::test_residue_reduce", "sympy/integrals/tests/test_risch.py::test_integrate_hyperexponential", "sympy/integrals/tests/test_risch.py::test_integrate_hyperexponential_polynomial", "sympy/integrals/tests/test_risch.py::test_integrate_hyperexponential_returns_piecewise", "sympy/integrals/tests/test_risch.py::test_integrate_primitive", "sympy/integrals/tests/test_risch.py::test_integrate_hypertangent_polynomial", "sympy/integrals/tests/test_risch.py::test_integrate_nonlinear_no_specials", "sympy/integrals/tests/test_risch.py::test_integer_powers", "sympy/integrals/tests/test_risch.py::test_DifferentialExtension_exp", "sympy/integrals/tests/test_risch.py::test_DifferentialExtension_log", "sympy/integrals/tests/test_risch.py::test_DifferentialExtension_handle_first", "sympy/integrals/tests/test_risch.py::test_DifferentialExtension_all_attrs", "sympy/integrals/tests/test_risch.py::test_DifferentialExtension_extension_flag", "sympy/integrals/tests/test_risch.py::test_DifferentialExtension_misc", "sympy/integrals/tests/test_risch.py::test_DifferentialExtension_Rothstein", "sympy/integrals/tests/test_risch.py::test_DecrementLevel", "sympy/integrals/tests/test_risch.py::test_risch_integrate_float", "sympy/integrals/tests/test_risch.py::test_NonElementaryIntegral", "sympy/physics/vector/tests/test_frame.py::test_coordinate_vars", "sympy/physics/vector/tests/test_frame.py::test_ang_vel", "sympy/physics/vector/tests/test_frame.py::test_dcm", "sympy/physics/vector/tests/test_frame.py::test_orientnew_respects_parent_class", "sympy/sets/tests/test_contains.py::test_contains_basic", "sympy/sets/tests/test_contains.py::test_issue_6194", "sympy/sets/tests/test_sets.py::test_interval_arguments", "sympy/sets/tests/test_sets.py::test_interval_symbolic_end_points", "sympy/sets/tests/test_sets.py::test_union", "sympy/sets/tests/test_sets.py::test_difference", "sympy/sets/tests/test_sets.py::test_Complement", "sympy/sets/tests/test_sets.py::test_complement", "sympy/sets/tests/test_sets.py::test_intersect", "sympy/sets/tests/test_sets.py::test_intersection", "sympy/sets/tests/test_sets.py::test_issue_9623", "sympy/sets/tests/test_sets.py::test_is_disjoint", "sympy/sets/tests/test_sets.py::test_ProductSet_of_single_arg_is_arg", "sympy/sets/tests/test_sets.py::test_interval_subs", "sympy/sets/tests/test_sets.py::test_interval_to_mpi", "sympy/sets/tests/test_sets.py::test_measure", "sympy/sets/tests/test_sets.py::test_is_subset", "sympy/sets/tests/test_sets.py::test_is_proper_subset", "sympy/sets/tests/test_sets.py::test_is_superset", "sympy/sets/tests/test_sets.py::test_is_proper_superset", "sympy/sets/tests/test_sets.py::test_contains", "sympy/sets/tests/test_sets.py::test_interval_symbolic", "sympy/sets/tests/test_sets.py::test_union_contains", "sympy/sets/tests/test_sets.py::test_is_number", "sympy/sets/tests/test_sets.py::test_Interval_is_left_unbounded", "sympy/sets/tests/test_sets.py::test_Interval_is_right_unbounded", "sympy/sets/tests/test_sets.py::test_Interval_as_relational", "sympy/sets/tests/test_sets.py::test_Finite_as_relational", "sympy/sets/tests/test_sets.py::test_Union_as_relational", "sympy/sets/tests/test_sets.py::test_Intersection_as_relational", "sympy/sets/tests/test_sets.py::test_EmptySet", "sympy/sets/tests/test_sets.py::test_finite_basic", "sympy/sets/tests/test_sets.py::test_powerset", "sympy/sets/tests/test_sets.py::test_product_basic", "sympy/sets/tests/test_sets.py::test_real", "sympy/sets/tests/test_sets.py::test_supinf", "sympy/sets/tests/test_sets.py::test_universalset", "sympy/sets/tests/test_sets.py::test_Union_of_ProductSets_shares", "sympy/sets/tests/test_sets.py::test_Interval_free_symbols", "sympy/sets/tests/test_sets.py::test_image_interval", "sympy/sets/tests/test_sets.py::test_image_piecewise", "sympy/sets/tests/test_sets.py::test_image_FiniteSet", "sympy/sets/tests/test_sets.py::test_image_Union", "sympy/sets/tests/test_sets.py::test_image_EmptySet", "sympy/sets/tests/test_sets.py::test_issue_5724_7680", "sympy/sets/tests/test_sets.py::test_boundary", "sympy/sets/tests/test_sets.py::test_boundary_Union", "sympy/sets/tests/test_sets.py::test_boundary_ProductSet", "sympy/sets/tests/test_sets.py::test_boundary_ProductSet_line", "sympy/sets/tests/test_sets.py::test_is_open", "sympy/sets/tests/test_sets.py::test_is_closed", "sympy/sets/tests/test_sets.py::test_closure", "sympy/sets/tests/test_sets.py::test_interior", "sympy/sets/tests/test_sets.py::test_issue_7841", "sympy/sets/tests/test_sets.py::test_Eq", "sympy/sets/tests/test_sets.py::test_SymmetricDifference", "sympy/sets/tests/test_sets.py::test_issue_9536", "sympy/sets/tests/test_sets.py::test_issue_9637", "sympy/sets/tests/test_sets.py::test_issue_9808", "sympy/sets/tests/test_sets.py::test_issue_9956", "sympy/sets/tests/test_sets.py::test_issue_Symbol_inter", "sympy/sets/tests/test_sets.py::test_issue_10113", "sympy/sets/tests/test_sets.py::test_issue_10248", "sympy/solvers/tests/test_inequalities.py::test_solve_poly_inequality", "sympy/solvers/tests/test_inequalities.py::test_reduce_poly_inequalities_real_interval", "sympy/solvers/tests/test_inequalities.py::test_reduce_poly_inequalities_complex_relational", "sympy/solvers/tests/test_inequalities.py::test_reduce_rational_inequalities_real_relational", "sympy/solvers/tests/test_inequalities.py::test_reduce_abs_inequalities", "sympy/solvers/tests/test_inequalities.py::test_reduce_inequalities_general", "sympy/solvers/tests/test_inequalities.py::test_reduce_inequalities_boolean", "sympy/solvers/tests/test_inequalities.py::test_reduce_inequalities_multivariate", "sympy/solvers/tests/test_inequalities.py::test_reduce_inequalities_errors", "sympy/solvers/tests/test_inequalities.py::test_hacky_inequalities", "sympy/solvers/tests/test_inequalities.py::test_issue_6343", "sympy/solvers/tests/test_inequalities.py::test_issue_8235", "sympy/solvers/tests/test_inequalities.py::test_issue_5526", "sympy/solvers/tests/test_inequalities.py::test_solve_univariate_inequality", "sympy/solvers/tests/test_inequalities.py::test_issue_9954", "sympy/solvers/tests/test_inequalities.py::test_slow_general_univariate", "sympy/solvers/tests/test_inequalities.py::test_issue_8545", "sympy/solvers/tests/test_inequalities.py::test_issue_8974", "sympy/solvers/tests/test_inequalities.py::test_issue_10047", "sympy/solvers/tests/test_inequalities.py::test_issue_10268", "sympy/tensor/array/tests/test_immutable_ndim_array.py::test_ndim_array_initiation", "sympy/tensor/array/tests/test_immutable_ndim_array.py::test_reshape", "sympy/tensor/array/tests/test_immutable_ndim_array.py::test_iterator", "sympy/tensor/array/tests/test_immutable_ndim_array.py::test_sparse", "sympy/tensor/array/tests/test_immutable_ndim_array.py::test_calculation", "sympy/tensor/array/tests/test_immutable_ndim_array.py::test_ndim_array_converting", "sympy/tensor/array/tests/test_immutable_ndim_array.py::test_converting_functions", "sympy/tensor/array/tests/test_immutable_ndim_array.py::test_equality", "sympy/tensor/array/tests/test_immutable_ndim_array.py::test_arithmetic", "sympy/tensor/array/tests/test_immutable_ndim_array.py::test_higher_dimenions", "sympy/tensor/array/tests/test_immutable_ndim_array.py::test_rebuild_immutable_arrays", "sympy/tensor/array/tests/test_immutable_ndim_array.py::test_slices", "sympy/tensor/array/tests/test_mutable_ndim_array.py::test_ndim_array_initiation", "sympy/tensor/array/tests/test_mutable_ndim_array.py::test_reshape", "sympy/tensor/array/tests/test_mutable_ndim_array.py::test_iterator", "sympy/tensor/array/tests/test_mutable_ndim_array.py::test_sparse", "sympy/tensor/array/tests/test_mutable_ndim_array.py::test_calculation", "sympy/tensor/array/tests/test_mutable_ndim_array.py::test_ndim_array_converting", "sympy/tensor/array/tests/test_mutable_ndim_array.py::test_converting_functions", "sympy/tensor/array/tests/test_mutable_ndim_array.py::test_equality", "sympy/tensor/array/tests/test_mutable_ndim_array.py::test_arithmetic", "sympy/tensor/array/tests/test_mutable_ndim_array.py::test_higher_dimenions" ]
[]
BSD
362
numberoverzero__bottom-17
4293d2726d2a7222faa55ca509871ee03f4e66e1
2016-01-02 04:11:06
4293d2726d2a7222faa55ca509871ee03f4e66e1
diff --git a/.python-version b/.python-version index fef12e2..d2286d0 100644 --- a/.python-version +++ b/.python-version @@ -1,1 +1,2 @@ bottom +3.5.0 diff --git a/.travis.yml b/.travis.yml index f37e3e8..a14cce9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,8 +1,8 @@ language: python -python: 3.5-dev +python: 3.5 env: - - TOXENV=py35 + - TOX_ENV=py35 install: pip install tox coveralls -script: tox -e $TOXENV +script: tox -e $TOX_ENV after_success: - coveralls diff --git a/MANIFEST.in b/MANIFEST.in index 564aaf0..736edbe 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,2 +1,2 @@ -include README.markdown +include README.rst recursive-exclude tests * diff --git a/README.markdown b/README.markdown deleted file mode 100644 index 06f249c..0000000 --- a/README.markdown +++ /dev/null @@ -1,808 +0,0 @@ -# bottom 0.9.13 - -[![Build Status] -(https://travis-ci.org/numberoverzero/bottom.svg?branch=master)] -(https://travis-ci.org/numberoverzero/bottom)[![Coverage Status] -(https://coveralls.io/repos/numberoverzero/bottom/badge.png?branch=master)] -(https://coveralls.io/r/numberoverzero/bottom?branch=master) - -Downloads https://pypi.python.org/pypi/bottom - -Source https://github.com/numberoverzero/bottom - -asyncio-based rfc2812-compliant IRC Client - -# Installation - -`pip install bottom` - -# Getting Started - -bottom isn't a kitchen-sink library. Instead, it provides a consistent API with a small surface area, tuned for performance and ease of extension. Similar to the routing style of bottle.py, hooking into events is one line. - -```python -import bottom -import asyncio - -NICK = 'bottom-bot' -CHANNEL = '#python' - -bot = bottom.Client('localhost', 6697) - - [email protected]('CLIENT_CONNECT') -def connect(): - bot.send('NICK', nick=NICK) - bot.send('USER', user=NICK, realname='Bot using bottom.py') - bot.send('JOIN', channel=CHANNEL) - - [email protected]('PING') -def keepalive(message): - bot.send('PONG', message=message) - - [email protected]('PRIVMSG') -def message(nick, target, message): - ''' Echo all messages ''' - - # Don't echo ourselves - if nick == NICK: - return - # Direct message to bot - if target == NICK: - bot.send("PRIVMSG", target=nick, message=message) - # Message in channel - else: - bot.send("PRIVMSG", target=target, message=message) - -asyncio.get_event_loop().run_until_complete(bot.run()) -``` - -# Versioning and RFC2812 - -* Bottom follows semver for its **public** API. - - * Currently, `Client` is the only public member of bottom. - * IRC replies/codes which are not yet implemented may be added at any time, and will correspond to a patch - the function contract of `@on` method does not change. - * You should not rely on the internal api staying the same between minor versions. - * Over time, private apis may be raised to become public. The reverse will never occur. - -* There are a number of changes from RFC2812 - none should noticeably change how you interact with a standard IRC server. For specific adjustments, see the notes section of each command in [`Supported Commands`](#supported-commands). - -# Contributing -Contributions welcome! When reporting issues, please provide enough detail to reproduce the bug - sample code is ideal. When submitting a PR, please make sure `tox` passes (including flake8). - -### Development -bottom uses `tox`, `pytest` and `flake8`. To get everything set up: - -``` -# RECOMMENDED: create a virtualenv with: -# mkvirtualenv bottom -git clone https://github.com/numberoverzero/bottom.git -pip install tox -tox -``` - -### TODO -* Better `Client` docstrings -* Add missing replies/errors to `unpack.py:unpack_command` - * Add reply/error parameters to `unpack.py:parameters` - * Document [`Supported Events`](#supported-events) - - -### Contributors -* [fahhem](https://github.com/fahhem) -* [thebigmunch](https://github.com/thebigmunch) -* [tilal6991](https://github.com/tilal6991) - -# API - -### Client.run() - -*This is a coroutine.* - -Start the magic. This will connect the client, and then read until it disconnects. The `CLIENT_DISCONNECT` event will fire before the loop exits, allowing you to `yield from Client.connect()` and keep the client running. - -If you want to call this synchronously (block until it's complete) use the following: - -```python -import asyncio -# ... client is defined somewhere - -loop = asyncio.get_event_loop() -task = client.run() -loop.run_until_complete(task) -``` - -### Client.on(event)(func) - -This `@decorator` is the main way you'll interact with a `Client`. It takes a string, returning a function wrapper that validates the function and registers it for the given event. When that event occurs, the function will be called, mapping any arguments the function may expect from the set of available arguments for the event. - -Not all available arguments need to be used. For instance, both of the following are valid: - -```python [email protected]('PRIVMSG') -def event(nick, message, target): - ''' Doesn't use user, host. argument order is different ''' - # message sent to bot - echo message - if target == bot.nick: - bot.send('PRIVMSG', target, message=message) - # Some channel we're watching - elif target == bot.monitored_channel: - logger.info("{} -> {}: {}".format(nick, target, message)) - - [email protected]('PRIVMSG') -def func(message, target): - ''' Just waiting for the signal ''' - if message == codeword && target == secret_channel: - execute_heist() -``` - -VAR_KWARGS can be used, as long as the name doesn't mask an actual parameter. VAR_ARGS may not be used. - -```python -# OK - kwargs, no masking [email protected]('PRIVMSG') -def event(message, **everything_else): - logger.log(everything_else['nick'] + " said " + message) - - -# NOT OK - kwargs, masking parameter <nick> [email protected]('PRIVMSG') -def event(message, **nick): - logger.log(nick['target']) - - -# NOT OK - uses VAR_ARGS [email protected]('PRIVMSG') -def event(message, *args): - logger.log(args) -``` - -Decorated functions will be invoked asynchronously, and may optionally use the `yield from` syntax. Functions do not need to be wrapped with `@ayncio.coroutine` - this is handled as part of the function caching process. - -### Client.trigger(event, **kwargs) - -*This is a coroutine.* - -Manually inject a command or reply as if it came from the server. This is useful for invoking other handlers. - -```python -# Manually trigger `PRIVMSG` handlers: -yield from bot.trigger('privmsg', nick="always_says_no", message="yes") -``` - -```python -# Rename !commands to !help [email protected]('privmsg') -def parse(nick, target, message): - if message == '!commands': - bot.send('privmsg', target=nick, - message="!commands was renamed to !help in 1.2") - # Don't make them retype it, just make it happen - yield from bot.trigger('privmsg', nick=nick, - target=target, message="!help") -``` - -```python -# While testing the auto-reconnect module, simulate a disconnect: -def test_reconnect(bot): - loop = asyncio.get_event_loop() - loop.run_until_complete(bot.trigger("client_disconnect")) - assert bot.connected -``` - -### Client.connect() - -*This is a coroutine.* - -Attempt to reconnect using the client's host, port. - -```python [email protected]('client_disconnect') -def reconnect(): - # Wait a few seconds - yield from asyncio.sleep(3) - yield from bot.connect() -``` - -### Client.disconnect() - -*This is a coroutine.* - -Disconnect from the server if connected. - -```python [email protected]('privmsg') -def suicide_pill(nick, message): - if nick == "spy_handler" and message == "last stop": - yield from bot.disconnect() -``` - -### Client.send(command, **kwargs) - -Send a command to the server. See [`Supported Commands`](#supported-commands) for a detailed breakdown of available commands and their parameters. - -# Supported Commands - -These commands can be sent to the server using [`Client.send`](#clientsendcommand-kwargs). - -For incoming signals and messages, see [`Supported Events`](#supported-events) below. - -#### Documentation Layout -There are three parts to each command's documentation: - -1. **Python syntax** - sample calls using available parameters -2. **Normalized IRC wire format** - the normalized translation from python keywords to a literal string that will be constructed by the client and sent to the server. The following syntax is used: - * `<parameter>` the location of the `parameter` passed to `send`. Literal `<>` are not transferred. - * `[value]` an optional value, which may be excluded. In some cases, such as [`LINKS`](#links), an optional value may only be provided if another dependant value is present. Literal `[]` are not transferred. - * `:` the start of a field which may contain spaces. This is always the last field of an IRC line. - * `"value"` literal value as printed. Literal `""` are not transferred. -3. **Notes** - additional options or restrictions on commands that do not fit a pre-defined convention. Common notes include keywords for ease of searching: - * `RFC_DELTA` - Some commands have different parameters from their RFC2812 definitions. **Please pay attention to these notes, since they are the most likely to cause issues**. These changes can include: - * Addition of new required or optional parameters - * Default values for new or existing parameters - * `CONDITIONAL_OPTION` - there are some commands whose values depend on each other. For example, [`LINKS`](#links), `<mask>` REQUIRES `<remote>`. - * `MULTIPLE_VALUES` - Some commands can handle non-string iterables, such as [`WHOWAS`](#whowas) where `<nick>` can handle both `"WiZ"` and `["WiZ", "WiZ-friend"]`. - * `PARAM_RENAME` - Some commands have renamed parameters from their RFC2812 specification to improve comsistency. - -## Local Events -*(trigger only)* - -#### CLIENT_CONNECT -```python -yield from client.trigger('CLIENT_CONNECT', host='localhost', port=6697) -``` -#### CLIENT_DISCONNECT -```python -yield from client.trigger('CLIENT_DISCONNECT', host='localhost', port=6697) -``` - -## Connection Registration -#### [PASS] -```python -client.send('PASS', password='hunter2') -``` - - PASS <password> - -#### [NICK] -```python -client.send('nick', nick='WiZ') -``` - - NICK <nick> - -* PARAM_RENAME `nickname -> nick` - -#### [USER] -```python -client.send('USER', user='WiZ-user', realname='Ronnie') -client.send('USER', user='WiZ-user', mode='8', realname='Ronnie') -``` - - USER <user> [<mode>] :<realname> - -* RFC_DELTA `mode` is optional - default is `0` - -#### [OPER] -```python -client.send('OPER', user='WiZ', password='hunter2') -``` - - OPER <user> <password> - -* PARAM_RENAME `name -> user` - -#### [USERMODE][USERMODE] (renamed from [MODE][USERMODE]) -```python -client.send('USERMODE', nick='WiZ') -client.send('USERMODE', nick='WiZ', modes='+io') -``` - - MODE <nick> [<modes>] - -* RFC_DELTA rfc did not name `modes` parameter - -#### [SERVICE] -```python -client.send('SERVICE', nick='CHANSERV', distribution='*.en', - type='0', info='manages channels') -``` - - SERVICE <nick> <distribution> <type> :<info> - -* PARAM_RENAME `nickname -> nick` - -#### [QUIT] -```python -client.send('QUIT') -client.send('QUIT', message='Gone to Lunch') -``` - - QUIT :[<message>] - -* PARAM_RENAME `Quit Message -> message` - -#### [SQUIT] -```python -client.send('SQUIT', server='tolsun.oulu.fi') -client.send('SQUIT', server='tolsun.oulu.fi', message='Bad Link') -``` - - SQUIT <server> :[<message>] - -* PARAM_RENAME `Comment -> message` -* RFC_DELTA `message` is optional - rfc says comment SHOULD be supplied; syntax shows required - -## Channel Operations - -#### [JOIN] -```python -client.send('JOIN', channel='0') # send PART to all joined channels -client.send('JOIN', channel='#foo-chan') -client.send('JOIN', channel='#foo-chan', key='foo-key') -client.send('JOIN', channel=['#foo-chan', '#other'], key='key-for-both') -client.send('JOIN', channel=['#foo-chan', '#other'], key=['foo-key', 'other-key']) -``` - - JOIN <channel> [<key>] - -* MULTIPLE_VALUES `channel` and `key` -* If `channel` has n > 1 values, `key` MUST have 1 or n values - -#### [PART] -```python -client.send('PART', channel='#foo-chan') -client.send('PART', channel=['#foo-chan', '#other']) -client.send('PART', channel='#foo-chan', message='I lost') -``` - - PART <channel> :[<message>] - -* MULTIPLE_VALUES `channel` - -#### [CHANNELMODE][CHANNELMODE] (renamed from [MODE][CHANNELMODE]) -```python -client.send('CHANNELMODE', channel='#foo-chan', modes='+b') -client.send('CHANNELMODE', channel='#foo-chan', modes='+l', params='10') -``` - - MODE <channel> <modes> [<params>] - -* PARAM_RENAME `modeparams -> params` - -#### [TOPIC] -```python -client.send('TOPIC', channel='#foo-chan') -client.send('TOPIC', channel='#foo-chan', message='') # Clear channel message -client.send('TOPIC', channel='#foo-chan', message='Yes, this is dog') -``` - - TOPIC <channel> :[<message>] - -* PARAM_RENAME `topic -> message` - -#### [NAMES] -```python -client.send('NAMES') -client.send('NAMES', channel='#foo-chan') -client.send('NAMES', channel=['#foo-chan', '#other']) -client.send('NAMES', channel=['#foo-chan', '#other'], target='remote.*.edu') -``` - - NAMES [<channel>] [<target>] - -* MULTIPLE_VALUES `channel` -* CONDITIONAL_OPTION `target` requires `channel` - -#### [LIST] -```python -client.send('LIST') -client.send('LIST', channel='#foo-chan') -client.send('LIST', channel=['#foo-chan', '#other']) -client.send('LIST', channel=['#foo-chan', '#other'], target='remote.*.edu') -``` - - LIST [<channel>] [<target>] - -* MULTIPLE_VALUES `channel` -* CONDITIONAL_OPTION `target` requires `channel` - -#### [INVITE] -```python -client.send('INVITE', nick='WiZ-friend', channel='#bar-chan') -``` - - INVITE <nick> <channel> - -* PARAM_RENAME `nickname -> nick` - -#### [KICK] -```python -client.send('KICK', channel='#foo-chan', nick='WiZ') -client.send('KICK', channel='#foo-chan', nick='WiZ', message='Spamming') -client.send('KICK', channel='#foo-chan', nick=['WiZ', 'WiZ-friend']) -client.send('KICK', channel=['#foo', '#bar'], nick=['WiZ', 'WiZ-friend']) -``` - - KICK <channel> <nick> :[<message>] - -* PARAM_RENAME `nickname -> nick` -* PARAM_RENAME `comment -> message` -* MULTIPLE_VALUES `channel` and `nick` -* If `nick` has n > 1 values, channel MUST have 1 or n values -* `channel` can have n > 1 values IFF `nick` has n values - -## Sending Messages -#### [PRIVMSG] -```python -client.send('PRIVMSG', target='WiZ-friend', message='Hello, friend!') -``` - - PRIVMSG <target> :<message> - -* PARAM_RENAME `msgtarget -> target` -* PARAM_RENAME `text to be sent -> message` - -#### [NOTICE] -```python -client.send('NOTICE', target='#foo-chan', message='Maintenance in 5 mins') -``` - - NOTICE <target> :<message> - -* PARAM_RENAME `msgtarget -> target` -* PARAM_RENAME `text -> message` - -## Server Queries and Commands -#### [MOTD] -```python -client.send('MOTD') -client.send('MOTD', target='remote.*.edu') -``` - - MOTD [<target>] - -#### [LUSERS] -```python -client.send('LUSERS') -client.send('LUSERS', mask='*.edu') -client.send('LUSERS', mask='*.edu', target='remote.*.edu') -``` - - LUSERS [<mask>] [<target>] - -* CONDITIONAL_OPTION `target` requires `mask` - -#### [VERSION] -```python -client.send('VERSION') -``` - - VERSION [<target>] - -#### [STATS] -```python -client.send('STATS') -client.send('STATS', query='m') -client.send('STATS', query='m', target='remote.*.edu') -``` - - STATS [<query>] [<target>] - -* CONDITIONAL_OPTION `target` requires `query` - -#### [LINKS] -```python -client.send('LINKS') -client.send('LINKS', mask='*.bu.edu') -client.send('LINKS', remote='*.edu', mask='*.bu.edu') -``` - - LINKS [<remote>] [<mask>] - -* PARAM_RENAME `remote server -> remote` -* PARAM_RENAME `server mask -> mask` -* CONDITIONAL_OPTION `remote` requires `mask` - -#### [TIME] -```python -client.send('TIME') -client.send('TIME', target='remote.*.edu') -``` - - TIME [<target>] - -#### [CONNECT] -```python -client.send('CONNECT', target='tolsun.oulu.fi', port=6667) -client.send('CONNECT', target='tolsun.oulu.fi', port=6667, remote='*.edu') -``` - - CONNECT <target> <port> [<remote>] - -* PARAM_RENAME `target server -> target` -* PARAM_RENAME `remote server -> remote` - -#### [TRACE] -```python -client.send('TRACE') -client.send('TRACE', target='remote.*.edu') -``` - - TRACE [<target>] - -#### [ADMIN] -```python -client.send('ADMIN') -client.send('ADMIN', target='remote.*.edu') -``` - - ADMIN [<target>] - -#### [INFO] -```python -client.send('INFO') -client.send('INFO', target='remote.*.edu') -``` - - INFO [<target>] - -## Service Query and Commands -#### [SERVLIST] -```python -client.send('SERVLIST', mask='*SERV') -client.send('SERVLIST', mask='*SERV', type=3) -``` - - SERVLIST [<mask>] [<type>] - -* CONDITIONAL_OPTION `type` requires `mask` - -#### [SQUERY] -```python -client.send('SQUERY', target='irchelp', message='HELP privmsg') -``` - - SQUERY <target> :<message> - -* PARAM_RENAME `servicename -> target` -* PARAM_RENAME `text -> message` - -## User Based Queries -#### [WHO] -```python -client.send('WHO') -client.send('WHO', mask='*.fi') -client.send('WHO', mask='*.fi', o=True) -``` - - WHO [<mask>] ["o"] - -* Optional positional parameter "o" is included if the kwarg "o" is Truthy - -#### [WHOIS] -```python -client.send('WHOIS', mask='*.fi') -client.send('WHOIS', mask=['*.fi', '*.edu'], target='remote.*.edu') -``` - - WHOIS <mask> [<target>] - -* MULTIPLE_VALUES `mask` - -#### [WHOWAS] -```python -client.send('WHOWAS', nick='WiZ') -client.send('WHOWAS', nick='WiZ', count=10) -client.send('WHOWAS', nick=['WiZ', 'WiZ-friend'], count=10) -client.send('WHOWAS', nick='WiZ', count=10, target='remote.*.edu') -``` - - WHOWAS <nick> [<count>] [<target>] - -* PARAM_RENAME `nickname -> nick` -* MULTIPLE_VALUES `nick` -* CONDITIONAL_OPTION `target` requires `count` - -## Miscellaneous Messages -#### [KILL] -```python -client.send('KILL', nick='WiZ', message='Spamming Joins') -``` - - KILL <nick> :<message> - -* PARAM_RENAME `nickname -> nick` -* PARAM_RENAME `comment -> message` - -#### [PING] -```python -client.send('PING', message='Test..') -client.send('PING', server2='tolsun.oulu.fi') -client.send('PING', server1='WiZ', server2='tolsun.oulu.fi') -``` - - PING [<server1>] [<server2>] :[<message>] - -* RFC_DELTA `server1` is optional -* RFC_DELTA `message` is new, and optional -* CONDITIONAL_OPTION `server2` requires `server1` - -#### [PONG] -```python -client.send('PONG', message='Test..') -client.send('PONG', server2='tolsun.oulu.fi') -client.send('PONG', server1='WiZ', server2='tolsun.oulu.fi') -``` - - PONG [<server1>] [<server2>] :[<message>] - -* RFC_DELTA `server1` is optional -* RFC_DELTA `message` is new, and optional -* CONDITIONAL_OPTION `server2` requires `server1` - -## Optional Features -#### [AWAY] -```python -client.send('AWAY') -client.send('AWAY', message='Gone to Lunch') -``` - - AWAY :[<message>] - -* PARAM_RENAME `text -> message` - -#### [REHASH] -```python -client.send('REHASH') -``` - - REHASH - -#### [DIE] -```python -client.send('DIE') -``` - - DIE - -#### [RESTART] -```python -client.send('RESTART') -``` - - RESTART - -#### [SUMMON] -```python -client.send('SUMMON', nick='WiZ') -client.send('SUMMON', nick='WiZ', target='remote.*.edu') -client.send('SUMMON', nick='WiZ', target='remote.*.edu', channel='#foo-chan') -``` - - SUMMON <nick> [<target>] [<channel>] - -* PARAM_RENAME `user -> nick` -* CONDITIONAL_OPTION `channel` requires `target` - -#### [USERS] -```python -client.send('USERS') -client.send('USERS', target='remote.*.edu') -``` - - USERS [<target>] - -#### [WALLOPS] -```python -client.send('WALLOPS', message='Maintenance in 5 minutes') -``` - - WALLOPS :<message> - -* PARAM_RENAME `Text to be sent -> message` - -#### [USERHOST] -```python -client.send('USERHOST', nick='WiZ') -client.send('USERHOST', nick=['WiZ', 'WiZ-friend']) -``` - - USERHOST <nick> - -* PARAM_RENAME `nickname -> nick` -* MULTIPLE_VALUES `nick` - -#### [ISON] -```python -client.send('ISON', nick='WiZ') -client.send('ISON', nick=['WiZ', 'WiZ-friend']) -``` - - ISON <nick> - -* PARAM_RENAME `nickname -> nick` -* MULTIPLE_VALUES `nick` - -[PASS]: https://tools.ietf.org/html/rfc2812#section-3.1.1 -[NICK]: https://tools.ietf.org/html/rfc2812#section-3.1.2 -[USER]: https://tools.ietf.org/html/rfc2812#section-3.1.3 -[OPER]: https://tools.ietf.org/html/rfc2812#section-3.1.4 -[USERMODE]: https://tools.ietf.org/html/rfc2812#section-3.1.5 -[SERVICE]: https://tools.ietf.org/html/rfc2812#section-3.1.6 -[QUIT]: https://tools.ietf.org/html/rfc2812#section-3.1.7 -[SQUIT]: https://tools.ietf.org/html/rfc2812#section-3.1.8 - -[JOIN]: https://tools.ietf.org/html/rfc2812#section-3.2.1 -[PART]: https://tools.ietf.org/html/rfc2812#section-3.2.2 -[CHANNELMODE]: https://tools.ietf.org/html/rfc2812#section-3.2.3 -[TOPIC]: https://tools.ietf.org/html/rfc2812#section-3.2.4 -[NAMES]: https://tools.ietf.org/html/rfc2812#section-3.2.5 -[LIST]: https://tools.ietf.org/html/rfc2812#section-3.2.6 -[INVITE]: https://tools.ietf.org/html/rfc2812#section-3.2.7 -[KICK]: https://tools.ietf.org/html/rfc2812#section-3.2.8 - -[PRIVMSG]: https://tools.ietf.org/html/rfc2812#section-3.3.1 -[NOTICE]: https://tools.ietf.org/html/rfc2812#section-3.3.2 - -[MOTD]: https://tools.ietf.org/html/rfc2812#section-3.4.1 -[LUSERS]: https://tools.ietf.org/html/rfc2812#section-3.4.2 -[VERSION]: https://tools.ietf.org/html/rfc2812#section-3.4.3 -[STATS]: https://tools.ietf.org/html/rfc2812#section-3.4.4 -[LINKS]: https://tools.ietf.org/html/rfc2812#section-3.4.5 -[TIME]: https://tools.ietf.org/html/rfc2812#section-3.4.6 -[CONNECT]: https://tools.ietf.org/html/rfc2812#section-3.4.7 -[TRACE]: https://tools.ietf.org/html/rfc2812#section-3.4.8 -[ADMIN]: https://tools.ietf.org/html/rfc2812#section-3.4.9 -[INFO]: https://tools.ietf.org/html/rfc2812#section-3.4.10 - -[SERVLIST]: https://tools.ietf.org/html/rfc2812#section-3.5.1 -[SQUERY]: https://tools.ietf.org/html/rfc2812#section-3.5.2 - -[WHO]: https://tools.ietf.org/html/rfc2812#section-3.6.1 -[WHOIS]: https://tools.ietf.org/html/rfc2812#section-3.6.2 -[WHOWAS]: https://tools.ietf.org/html/rfc2812#section-3.6.3 - -[KILL]: https://tools.ietf.org/html/rfc2812#section-3.7.1 -[PING]: https://tools.ietf.org/html/rfc2812#section-3.7.2 -[PONG]: https://tools.ietf.org/html/rfc2812#section-3.7.3 - -[AWAY]: https://tools.ietf.org/html/rfc2812#section-4.1 -[REHASH]: https://tools.ietf.org/html/rfc2812#section-4.2 -[DIE]: https://tools.ietf.org/html/rfc2812#section-4.3 -[RESTART]: https://tools.ietf.org/html/rfc2812#section-4.4 -[SUMMON]: https://tools.ietf.org/html/rfc2812#section-4.5 -[USERS]: https://tools.ietf.org/html/rfc2812#section-4.6 -[WALLOPS]: https://tools.ietf.org/html/rfc2812#section-4.7 -[USERHOST]: https://tools.ietf.org/html/rfc2812#section-4.8 -[ISON]: https://tools.ietf.org/html/rfc2812#section-4.9 - - -# Supported Events - -These commands are received from the server, or dispatched using `Client.trigger(...)`. - -For sending commands, see [`Supported Commands`](#supported-commands) above. - -* PING -* JOIN -* PART -* PRIVMSG -* NOTICE -* RPL_WELCOME (001) -* RPL_YOURHOST (002) -* RPL_CREATED (003) -* RPL_MYINFO (004) -* RPL_BOUNCE (005) -* RPL_MOTDSTART (375) -* RPL_MOTD (372) -* RPL_ENDOFMOTD (376) -* RPL_LUSERCLIENT (251) -* RPL_LUSERME (255) -* RPL_LUSEROP (252) -* RPL_LUSERUNKNOWN (253) -* RPL_LUSERCHANNELS (254) diff --git a/README.rst b/README.rst new file mode 100644 index 0000000..8331488 --- /dev/null +++ b/README.rst @@ -0,0 +1,524 @@ +.. image:: https://img.shields.io/travis/numberoverzero/bottom/master.svg?style=flat-square + :target: https://travis-ci.org/numberoverzero/bottom +.. image:: https://img.shields.io/coveralls/numberoverzero/bottom/master.svg?style=flat-square + :target: https://coveralls.io/github/numberoverzero/bottom +.. image:: https://img.shields.io/pypi/v/bottom.svg?style=flat-square + :target: https://pypi.python.org/pypi/bottom +.. image:: https://img.shields.io/github/issues-raw/numberoverzero/bottom.svg?style=flat-square + :target: https://github.com/numberoverzero/bottom/issues +.. image:: https://img.shields.io/pypi/l/bottom.svg?style=flat-square + :target: https://github.com/numberoverzero/bottom/blob/master/LICENSE + +asyncio-based rfc2812-compliant IRC Client (3.5+) + +Installation +============ +:: + + pip install bottom + +Getting Started +=============== + +bottom isn't a kitchen-sink library. Instead, it provides a consistent API with a small surface area, tuned for performance and ease of extension. Similar to the routing style of bottle.py, hooking into events is one line. + +:: + + import bottom + import asyncio + + NICK = 'bottom-bot' + CHANNEL = '#python' + + bot = bottom.Client('localhost', 6697) + + + @bot.on('CLIENT_CONNECT') + def connect(): + bot.send('NICK', nick=NICK) + bot.send('USER', user=NICK, realname='Bot using bottom.py') + bot.send('JOIN', channel=CHANNEL) + + + @bot.on('PING') + def keepalive(message): + bot.send('PONG', message=message) + + + @bot.on('PRIVMSG') + def message(nick, target, message): + ''' Echo all messages ''' + + # Don't echo ourselves + if nick == NICK: + return + # Direct message to bot + if target == NICK: + bot.send("PRIVMSG", target=nick, message=message) + # Message in channel + else: + bot.send("PRIVMSG", target=target, message=message) + + asyncio.get_event_loop().run_until_complete(bot.run()) + +Versioning and RFC2812 +======================= + +* Bottom follows semver for its **public** API. + + * Currently, ``Client`` is the only public member of bottom. + * IRC replies/codes which are not yet implemented may be added at any time, and will correspond to a patch - the function contract of ``@on`` method does not change. + * You should not rely on the internal api staying the same between minor versions. + * Over time, private apis may be raised to become public. The reverse will never occur. + +* There are a number of changes from RFC2812 - none should noticeably change how you interact with a standard IRC server. For specific adjustments, see the notes above each command in supported_commands_. + +Contributing +============ + +Contributions welcome! When reporting issues, please provide enough detail to reproduce the bug - sample code is ideal. When submitting a PR, please make sure ``tox`` passes (including flake8). + +Development +----------- + +bottom uses ``tox``, ``pytest`` and ``flake8``. To get everything set up:: + + # RECOMMENDED: create a virtualenv with: + # mkvirtualenv bottom + git clone https://github.com/numberoverzero/bottom.git + pip install tox + tox + + +TODO +---- + +* Better `Client` docstrings +* Add missing replies/errors to `unpack.py:unpack_command` + + * Add reply/error parameters to `unpack.py:parameters` + * Document supported_events_ + + +Contributors +------------ +* `fahhem <https://github.com/fahhem>`_ +* `thebigmunch <https://github.com/thebigmunch>`_ +* `tilal6991 <https://github.com/tilal6991>`_ +* `AMorporkian <https://github.com/AMorporkian>`_ + +API +=== + +Client.run() +------------ + +*This is a coroutine.* + +Start the magic. This will connect the client, and then read until it disconnects. The ``CLIENT_DISCONNECT`` event will fire before the loop exits, allowing you to ``await Client.connect()`` and keep the client running. + +If you want to call this synchronously (block until it's complete) use the following:: + + import asyncio + # ... client is defined somewhere + + loop = asyncio.get_event_loop() + task = client.run() + loop.run_until_complete(task) + + +Client.on(event)(func) +---------------------- + +This ``@decorator`` is the main way you'll interact with a ``Client``. It takes a string, returning a function wrapper that validates the function and registers it for the given event. When that event occurs, the function will be called, mapping any arguments the function may expect from the set of available arguments for the event. + +Not all available arguments need to be used. For instance, both of the following are valid:: + + @bot.on('PRIVMSG') + def event(nick, message, target): + ''' Doesn't use user, host. argument order is different ''' + # message sent to bot - echo message + if target == bot.nick: + bot.send('PRIVMSG', target, message=message) + # Some channel we're watching + elif target == bot.monitored_channel: + logger.info("{} -> {}: {}".format(nick, target, message)) + + + @bot.on('PRIVMSG') + def func(message, target): + ''' Just waiting for the signal ''' + if message == codeword && target == secret_channel: + execute_heist() + + +VAR_KWARGS can be used, as long as the name doesn't mask an actual parameter. VAR_ARGS may not be used. + +:: + + # OK - kwargs, no masking + @bot.on('PRIVMSG') + def event(message, **everything_else): + logger.log(everything_else['nick'] + " said " + message) + + + # NOT OK - kwargs, masking parameter <nick> + @bot.on('PRIVMSG') + def event(message, **nick): + logger.log(nick['target']) + + + # NOT OK - uses VAR_ARGS + @bot.on('PRIVMSG') + def event(message, *args): + logger.log(args) + + +Decorated functions will be invoked asynchronously, and may optionally use the ``await`` syntax. Functions do not need to be wrapped with ``@ayncio.coroutine`` - this is handled as part of the function caching process. + +Client.trigger(event, \*\*kwargs) +------------------------------- + +Manually inject a command or reply as if it came from the server. This is useful for invoking other handlers. +Note that because trigger doesn't block, registered callbacks for the event won't run until +the event loop yields to them. + +:: + + # Manually trigger `PRIVMSG` handlers: + bot.trigger('privmsg', nick="always_says_no", message="yes") + +:: + + # Rename !commands to !help + @bot.on('privmsg') + def parse(nick, target, message): + if message == '!commands': + bot.send('privmsg', target=nick, + message="!commands was renamed to !help in 1.2") + # Don't make them retype it, just make it happen + bot.trigger('privmsg', nick=nick, + target=target, message="!help") + +:: + + # While testing the auto-reconnect module, simulate a disconnect: + def test_reconnect(bot): + bot.trigger("client_disconnect") + # Clear out the pending callbacks + bot.loop.run_until_complete(asyncio.sleep(0, loop=bot.loop)) + assert bot.connected + +Client.connect() +---------------- + +*This is a coroutine.* + +Attempt to reconnect using the client's host, port:: + + @bot.on('client_disconnect') + async def reconnect(): + # Wait a few seconds + await asyncio.sleep(3) + await bot.connect() + + +Client.disconnect() +------------------- + +*This is a coroutine.* + +Disconnect from the server if connected:: + + @bot.on('privmsg') + async def suicide_pill(nick, message): + if nick == "spy_handler" and message == "last stop": + await bot.disconnect() + +Client.send(command, \*\*kwargs) +------------------------------ + +Send a command to the server. + +.. _supported_commands: + +Supported Commands +================== + +:: + + client.send('PASS', password='hunter2') + +:: + + client.send('NICK', nick='WiZ') + +:: + + # mode is optional, default is 0 + client.send('USER', user='WiZ-user', realname='Ronnie') + client.send('USER', user='WiZ-user', mode='8', realname='Ronnie') + +:: + + client.send('OPER', user='WiZ', password='hunter2') + +:: + + # Renamed from MODE + client.send('USERMODE', nick='WiZ') + client.send('USERMODE', nick='WiZ', modes='+io') + +:: + + client.send('SERVICE', nick='CHANSERV', distribution='*.en', + type='0', info='manages channels') + +:: + + client.send('QUIT') + client.send('QUIT', message='Gone to Lunch') + +:: + + client.send('SQUIT', server='tolsun.oulu.fi') + client.send('SQUIT', server='tolsun.oulu.fi', message='Bad Link') + +:: + + # If channel has n > 1 values, key MUST have 1 or n values + client.send('JOIN', channel='0') # send PART to all joined channels + client.send('JOIN', channel='#foo-chan') + client.send('JOIN', channel='#foo-chan', key='foo-key') + client.send('JOIN', channel=['#foo-chan', '#other'], key='key-for-both') + client.send('JOIN', channel=['#foo-chan', '#other'], key=['foo-key', 'other-key']) + +:: + + client.send('PART', channel='#foo-chan') + client.send('PART', channel=['#foo-chan', '#other']) + client.send('PART', channel='#foo-chan', message='I lost') + +:: + + # Renamed from MODE + client.send('CHANNELMODE', channel='#foo-chan', modes='+b') + client.send('CHANNELMODE', channel='#foo-chan', modes='+l', params='10') + +:: + + client.send('TOPIC', channel='#foo-chan') + client.send('TOPIC', channel='#foo-chan', message='') # Clear channel message + client.send('TOPIC', channel='#foo-chan', message='Yes, this is dog') + +:: + + # target requires channel + client.send('NAMES') + client.send('NAMES', channel='#foo-chan') + client.send('NAMES', channel=['#foo-chan', '#other']) + client.send('NAMES', channel=['#foo-chan', '#other'], target='remote.*.edu') + +:: + + # target requires channel + client.send('LIST') + client.send('LIST', channel='#foo-chan') + client.send('LIST', channel=['#foo-chan', '#other']) + client.send('LIST', channel=['#foo-chan', '#other'], target='remote.*.edu') + +:: + + client.send('INVITE', nick='WiZ-friend', channel='#bar-chan') + +:: + + # nick and channel must have the same number of elements + client.send('KICK', channel='#foo-chan', nick='WiZ') + client.send('KICK', channel='#foo-chan', nick='WiZ', message='Spamming') + client.send('KICK', channel='#foo-chan', nick=['WiZ', 'WiZ-friend']) + client.send('KICK', channel=['#foo', '#bar'], nick=['WiZ', 'WiZ-friend']) + +:: + + client.send('PRIVMSG', target='WiZ-friend', message='Hello, friend!') + +:: + + client.send('NOTICE', target='#foo-chan', message='Maintenance in 5 mins') + +:: + + client.send('MOTD') + client.send('MOTD', target='remote.*.edu') + +:: + + client.send('LUSERS') + client.send('LUSERS', mask='*.edu') + client.send('LUSERS', mask='*.edu', target='remote.*.edu') + +:: + + client.send('VERSION') + +:: + + # target requires query + client.send('STATS') + client.send('STATS', query='m') + client.send('STATS', query='m', target='remote.*.edu') + +:: + + # remote requires mask + client.send('LINKS') + client.send('LINKS', mask='*.bu.edu') + client.send('LINKS', remote='*.edu', mask='*.bu.edu') + +:: + + client.send('TIME') + client.send('TIME', target='remote.*.edu') + +:: + + client.send('CONNECT', target='tolsun.oulu.fi', port=6667) + client.send('CONNECT', target='tolsun.oulu.fi', port=6667, remote='*.edu') + +:: + + client.send('TRACE') + client.send('TRACE', target='remote.*.edu') + +:: + + client.send('ADMIN') + client.send('ADMIN', target='remote.*.edu') + +:: + + client.send('INFO') + client.send('INFO', target='remote.*.edu') + +:: + + # type requires mask + client.send('SERVLIST', mask='*SERV') + client.send('SERVLIST', mask='*SERV', type=3) + +:: + + client.send('SQUERY', target='irchelp', message='HELP privmsg') + +:: + + client.send('WHO') + client.send('WHO', mask='*.fi') + client.send('WHO', mask='*.fi', o=True) + +:: + + client.send('WHOIS', mask='*.fi') + client.send('WHOIS', mask=['*.fi', '*.edu'], target='remote.*.edu') + +:: + + # target requires count + client.send('WHOWAS', nick='WiZ') + client.send('WHOWAS', nick='WiZ', count=10) + client.send('WHOWAS', nick=['WiZ', 'WiZ-friend'], count=10) + client.send('WHOWAS', nick='WiZ', count=10, target='remote.*.edu') + +:: + + client.send('KILL', nick='WiZ', message='Spamming Joins') + +:: + + # server2 requires server1 + client.send('PING', message='Test..') + client.send('PING', server2='tolsun.oulu.fi') + client.send('PING', server1='WiZ', server2='tolsun.oulu.fi') + +:: + + # server2 requires server1 + client.send('PONG', message='Test..') + client.send('PONG', server2='tolsun.oulu.fi') + client.send('PONG', server1='WiZ', server2='tolsun.oulu.fi') + +:: + + client.send('AWAY') + client.send('AWAY', message='Gone to Lunch') + +:: + + client.send('REHASH') + +:: + + client.send('DIE') + +:: + + client.send('RESTART') + +:: + + # target requires channel + client.send('SUMMON', nick='WiZ') + client.send('SUMMON', nick='WiZ', target='remote.*.edu') + client.send('SUMMON', nick='WiZ', target='remote.*.edu', channel='#foo-chan') + +:: + + client.send('USERS') + client.send('USERS', target='remote.*.edu') + +:: + + client.send('WALLOPS', message='Maintenance in 5 minutes') + +:: + + client.send('USERHOST', nick='WiZ') + client.send('USERHOST', nick=['WiZ', 'WiZ-friend']) + +:: + + client.send('ISON', nick='WiZ') + client.send('ISON', nick=['WiZ', 'WiZ-friend']) + +.. _supported_events: + +Supported Events +================ + +These commands are received from the server, or dispatched using ``Client.trigger(...)``. + +:: + + # Local only events + client.trigger('CLIENT_CONNECT', host='localhost', port=6697) + client.trigger('CLIENT_DISCONNECT', host='localhost', port=6697) + +* PING +* JOIN +* PART +* PRIVMSG +* NOTICE +* RPL_WELCOME (001) +* RPL_YOURHOST (002) +* RPL_CREATED (003) +* RPL_MYINFO (004) +* RPL_BOUNCE (005) +* RPL_MOTDSTART (375) +* RPL_MOTD (372) +* RPL_ENDOFMOTD (376) +* RPL_LUSERCLIENT (251) +* RPL_LUSERME (255) +* RPL_LUSEROP (252) +* RPL_LUSERUNKNOWN (253) +* RPL_LUSERCHANNELS (254) diff --git a/bottom/__init__.py b/bottom/__init__.py index 90729cf..5974c2e 100644 --- a/bottom/__init__.py +++ b/bottom/__init__.py @@ -1,25 +1,22 @@ """ asyncio-based rfc2812-compliant IRC Client """ -import logging import asyncio from . import connection from . import event from . import pack from . import unpack __all__ = ["Client"] -logger = logging.getLogger(__name__) +__version__ = "1.0.0" class Client(event.EventsMixin): - __conn_cls__ = connection.Connection + __conn_cls = connection.Connection - def __init__(self, host, port, encoding='UTF-8', ssl=True): - # It's ok that unpack.parameters isn't cached, since it's only - # called when adding an event handler (which should __usually__ - # only occur during setup) - super().__init__(unpack.parameters) - # trigger events on the client - self.connection = self.__conn_cls__(host, port, self, - encoding=encoding, ssl=ssl) + def __init__(self, host, port, *, encoding='UTF-8', ssl=True, loop=None): + if loop is None: + loop = asyncio.get_event_loop() + super().__init__(unpack.parameters, loop=loop) + self.connection = self.__conn_cls(host, port, self, ssl=ssl, + encoding=encoding, loop=loop) def send(self, command, **kwargs): ''' @@ -34,22 +31,19 @@ class Client(event.EventsMixin): packed_command = pack.pack_command(command, **kwargs) self.connection.send(packed_command) - @asyncio.coroutine - def connect(self): - yield from self.connection.connect() + async def connect(self): + await self.connection.connect() - @asyncio.coroutine - def disconnect(self): - yield from self.connection.disconnect() + async def disconnect(self): + await self.connection.disconnect() @property def connected(self): return self.connection.connected - @asyncio.coroutine - def run(self, loop=None): + async def run(self): ''' Run the client until it disconnects (without reconnecting) ''' - yield from self.connection.run(loop=loop) + await self.connection.run() def on(self, command): ''' diff --git a/bottom/connection.py b/bottom/connection.py index 72434fa..f50b2f3 100644 --- a/bottom/connection.py +++ b/bottom/connection.py @@ -3,65 +3,61 @@ from . import unpack class Connection(object): - def __init__(self, host, port, events, encoding, ssl): + def __init__(self, host, port, events, encoding, ssl, *, loop): self.events = events self._connected = False self.host, self.port = host, port self.reader, self.writer = None, None self.encoding = encoding self.ssl = ssl + self.loop = loop - @asyncio.coroutine - def connect(self, loop=None): + async def connect(self): if self.connected: return - self.reader, self.writer = yield from asyncio.open_connection( - self.host, self.port, ssl=self.ssl, loop=loop) + self.reader, self.writer = await asyncio.open_connection( + self.host, self.port, ssl=self.ssl, loop=self.loop) self._connected = True - yield from self.events.trigger( - "CLIENT_CONNECT", host=self.host, port=self.port) + self.events.trigger("CLIENT_CONNECT", host=self.host, port=self.port) - @asyncio.coroutine - def disconnect(self): + async def disconnect(self): if not self.connected: return self.writer.close() self.writer = None self.reader = None self._connected = False - yield from self.events.trigger( + self.events.trigger( "CLIENT_DISCONNECT", host=self.host, port=self.port) @property def connected(self): return self._connected - @asyncio.coroutine - def run(self, loop=None): - yield from self.connect(loop=loop) + async def run(self): + await self.connect() while self.connected: - msg = yield from self.read() + msg = await self.read() if msg: try: event, kwargs = unpack.unpack_command(msg) except ValueError: print("PARSE ERROR {}".format(msg)) else: - yield from self.events.trigger(event, **kwargs) + self.events.trigger(event, **kwargs) else: # Lost connection - yield from self.disconnect() + await self.disconnect() def send(self, msg): if self.writer: self.writer.write((msg.strip() + '\n').encode(self.encoding)) - @asyncio.coroutine - def read(self): + async def read(self): if not self.reader: return '' try: - msg = yield from self.reader.readline() + msg = await self.reader.readline() return msg.decode(self.encoding, 'ignore').strip() except EOFError: return '' diff --git a/bottom/event.py b/bottom/event.py index 79370da..f94cbf1 100644 --- a/bottom/event.py +++ b/bottom/event.py @@ -5,7 +5,7 @@ missing = object() class EventsMixin(object): - def __init__(self, getparams): + def __init__(self, getparams, *, loop): ''' getparams is a function that takes a single argument (event) and returns a list of parameters for the event. It should raise on unknown @@ -15,28 +15,25 @@ class EventsMixin(object): # where event is a string, and list(func) is the list of functions # (wrapped and decorated) that will be invoked when the given event # is triggered. - self.__partials__ = collections.defaultdict(list) - self.__getparams__ = getparams + self._partials = collections.defaultdict(list) + self._getparams = getparams + self.loop = loop - def __add_event__(self, event, func): + def _add_event(self, event, func): ''' Validate the func's signature, then partial_bind the function to speed up argument injection. ''' - parameters = self.__getparams__(event) + parameters = self._getparams(event) validate_func(event, func, parameters) - self.__partials__[event].append(partial_bind(func)) + self._partials[event].append(partial_bind(func)) return func - @asyncio.coroutine def trigger(self, event, **kwargs): - ''' This is a coroutine so that we can `yield from` its execution ''' - partials = self.__partials__[event] - tasks = [func(**kwargs) for func in partials] - if not tasks: - return - yield from asyncio.wait(tasks) + partials = self._partials[event] + for func in partials: + self.loop.create_task(func(**kwargs)) def on(self, event): ''' @@ -60,12 +57,15 @@ class EventsMixin(object): event = 'test' kwargs = {'one': 1, 'two': 2, 'arg': 'arg'} + events.trigger(event, **kwargs) loop = asyncio.get_event_loop() - loop.run_until_complete(events.trigger(event, **kwargs)) + # Run all queued events + loop.stop() + loop.run_forever() ''' def wrap_function(func): - self.__add_event__(event, func) + self._add_event(event, func) return func return wrap_function @@ -110,7 +110,7 @@ def validate_func(event, func, parameters): def partial_bind(func): sig = inspect.signature(func) - # Wrap non-coroutines so we can always `yield from func(**kw)` + # Wrap non-coroutines so we can always `await func(**kw)` if not asyncio.iscoroutinefunction(func): func = asyncio.coroutine(func) base = {} @@ -122,8 +122,7 @@ def partial_bind(func): else: base[key] = default - @asyncio.coroutine - def wrapper(**kwargs): + async def wrapper(**kwargs): unbound = base.copy() # Only map params this function expects for key in base: @@ -131,6 +130,6 @@ def partial_bind(func): if new_value is not missing: unbound[key] = new_value bound = sig.bind(**unbound) - yield from func(*bound.args, **bound.kwargs) + await func(*bound.args, **bound.kwargs) return wrapper diff --git a/bottom/plugins/router.py b/bottom/plugins/router.py index b345e25..cd367d5 100644 --- a/bottom/plugins/router.py +++ b/bottom/plugins/router.py @@ -33,13 +33,13 @@ class Router(object): self.routes = {} bot.on("PRIVMSG")(self.handle) - async def handle(self, nick, target, message): + def handle(self, nick, target, message): ''' bot callback entrance ''' for regex, (func, pattern) in self.routes.items(): match = regex.match(message) if match: fields = match.groupdict() - await func(nick, target, fields) + self.bot.loop.create_task(func(nick, target, fields)) def route(self, pattern, **kwargs): ''' diff --git a/bottom/unpack.py b/bottom/unpack.py index 9c481f2..d5ecc8d 100644 --- a/bottom/unpack.py +++ b/bottom/unpack.py @@ -1,7 +1,6 @@ """ Simplified support for rfc2812 """ # https://tools.ietf.org/html/rfc2812 import re -missing = object() RE_IRCLINE = re.compile( """ diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 8f19ef8..0000000 --- a/setup.cfg +++ /dev/null @@ -1,6 +0,0 @@ -[run] -source = bottom -branch = True - -[flake8] -exclude = .tox, dist, doc, build, *.egg diff --git a/setup.py b/setup.py index 34a90cc..f79ea10 100644 --- a/setup.py +++ b/setup.py @@ -3,7 +3,13 @@ import os from setuptools import setup, find_packages HERE = os.path.abspath(os.path.dirname(__file__)) -README = open(os.path.join(HERE, 'README.markdown')).read() +README = open(os.path.join(HERE, 'README.rst')).read() + +def get_version(): + with open("bottom/__init__.py") as f: + for line in f: + if line.startswith("__version__"): + return eval(line.split("=")[-1]) REQUIREMENTS = [ 'simplex' @@ -20,7 +26,7 @@ TEST_REQUIREMENTS = [ if __name__ == "__main__": setup( name='bottom', - version='0.9.13', + version=get_version(), description="asyncio-based rfc2812-compliant IRC Client", long_description=README, classifiers=[ diff --git a/tox.ini b/tox.ini index 37f47e8..8dd916b 100644 --- a/tox.ini +++ b/tox.ini @@ -2,11 +2,10 @@ envlist = py35 [testenv] -deps = git+https://github.com/pytest-dev/pytest +deps = pytest flake8 - coverage>=4.0a1 + coverage commands = coverage run --branch --source=bottom -m py.test coverage report -m - # disabled until flake8 supports py3.5 - # flake8 bottom tests + flake8 bottom tests
Dispatch blocks, incorrect async After unpacking, the reading loop will [`yield from`](https://github.com/numberoverzero/bottom/blob/master/bottom/connection.py#L50) the event dispatch, which means it waits until all handlers run for the event, before continuing. Instead, `events.trigger` should be a non-blocking call. That is, it should push the event into a queue, then return execution to the caller. --- I'm working on [accordian](https://github.com/numberoverzero/accordian) to fix this - however, in trying to use the 3.5 syntax (`await` and `async` as keywords) I've run into other problems. Once I've finished testing the library, I'll integrate it into bottom. This will require 3.5+ to use bottom, but the migration from 3.4 (current requirement) to 3.5 is virtually painless (I haven't seen any reports of problems migrating).
numberoverzero/bottom
diff --git a/tests/conftest.py b/tests/conftest.py index 03eea58..3bf0c0f 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,3 +1,5 @@ +from bottom import Client +from bottom.connection import Connection from bottom.event import EventsMixin import pytest import asyncio @@ -5,38 +7,39 @@ import collections @pytest.fixture -def run(): +def loop(): ''' - Run a coro until it completes. - - Returns result from coro, if it produces one. + Keep things clean by using a new event loop ''' - def run_in_loop(coro): - # For more details on what's going on: - # https://docs.python.org/3/library/asyncio-task.html\ - # #example-future-with-run-until-complete - def capture_return(future): - ''' Push coro result into future for return ''' - result = yield from coro - future.set_result(result) - # Kick off the coro, wrapped in the future above - future = asyncio.Future() - asyncio.async(capture_return(future)) - - # Block until coro completes and dumps return in future - loop = asyncio.get_event_loop() - loop.run_until_complete(future) - - # Hand result back - return future.result() - return run_in_loop + loop = asyncio.new_event_loop() + loop.set_debug(True) + return loop @pytest.fixture -def loop(): - # TODO: fix to use a new event loop. Because the loop fix will require - # touching a lot of code, this is an easy way to get the build green again - return asyncio.new_event_loop() +def flush(loop): + """Run loop once, to execute any pending tasks""" + + async def sentinel(): + pass + + def _flush(): + loop.run_until_complete(sentinel()) + return _flush + + [email protected] +def schedule(loop): + def _schedule(*coros): + for coro in coros: + loop.create_task(coro) + return _schedule + + [email protected] +def connection(patch_connection, events, loop): + print("connection") + return Connection("host", "port", events, "UTF-8", True, loop=loop) @pytest.fixture @@ -45,9 +48,9 @@ def eventparams(): @pytest.fixture -def events(eventparams): +def events(eventparams, loop): ''' Return a no-op EventsMixin that tracks triggers ''' - return MockEvents(lambda e: eventparams[e]) + return MockEvents(lambda e: eventparams[e], loop=loop) @pytest.fixture @@ -60,6 +63,17 @@ def writer(): return MockStreamWriter() [email protected] +def client(patch_connection, loop): + ''' + Return a client with mocked out asyncio. + + Pulling in patch_connection here mocks out asyncio.open_connection, + so that we can use reader, writer, run in tests. + ''' + return Client("host", "port", loop=loop) + + @pytest.fixture def patch_connection(reader, writer, monkeypatch): ''' @@ -75,13 +89,13 @@ def patch_connection(reader, writer, monkeypatch): class MockEvents(EventsMixin): - def __init__(self, getparams): + def __init__(self, getparams, *, loop=None): self.triggered_events = collections.defaultdict(int) - super().__init__(getparams) + super().__init__(getparams, loop=loop) def trigger(self, event, **kwargs): self.triggered_events[event] += 1 - yield from super().trigger(event, **kwargs) + super().trigger(event, **kwargs) def triggered(self, event, n=1): ''' @@ -105,8 +119,7 @@ class MockStreamReader(): self.encoding = encoding self.used = False - @asyncio.coroutine - def readline(self): + async def readline(self): self.used = True try: line = self.lines.pop(0) diff --git a/tests/test_client.py b/tests/test_client.py index 1b38970..bfda26b 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -1,36 +1,32 @@ -from bottom import Client +import asyncio import pytest +from bottom import Client [email protected] -def client(patch_connection, run): - ''' - Return a client with mocked out asyncio. - - Pulling in patch_connection here mocks out asyncio.open_connection, - so that we can use reader, writer, run in tests. - ''' - return Client("host", "port") +def test_default_event_loop(): + default_loop = asyncio.get_event_loop() + client = Client(host="host", port="port") + assert client.loop is default_loop -def test_send_unknown_command(client, run): +def test_send_unknown_command(client, loop): ''' Sending an unknown command raises ''' - run(client.connect()) + loop.run_until_complete(client.connect()) assert client.connected with pytest.raises(ValueError): client.send("Unknown_Command") -def test_send_before_connected(client, writer, run): +def test_send_before_connected(client, writer): ''' Sending before connected does not invoke writer ''' client.send("PONG") assert not writer.used -def test_send_after_disconnected(client, writer, run): +def test_send_after_disconnected(client, writer, loop): ''' Sending after disconnect does not invoke writer ''' - run(client.connect()) - run(client.disconnect()) + loop.run_until_complete(client.connect()) + loop.run_until_complete(client.disconnect()) client.send("PONG") assert not writer.used @@ -41,13 +37,13 @@ def test_on(client): @client.on('privmsg') def route(nick, target, message): pass - assert len(client.__partials__["PRIVMSG"]) == 1 + assert len(client._partials["PRIVMSG"]) == 1 with pytest.raises(ValueError): client.on("UNKNOWN_COMMAND")(route) -def test_run_(client, reader, eventparams, run): +def test_run_(client, reader, eventparams, loop): ''' run delegates to Connection, which triggers events on the Client ''' reader.push(":nick!user@host PRIVMSG #target :this is message") received = [] @@ -56,7 +52,7 @@ def test_run_(client, reader, eventparams, run): def receive(nick, user, host, target, message): received.extend([nick, user, host, target, message]) - run(client.run()) + loop.run_until_complete(client.run()) assert reader.has_read(":nick!user@host PRIVMSG #target :this is message") assert received == ["nick", "user", "host", "#target", "this is message"] diff --git a/tests/test_connection.py b/tests/test_connection.py index 6eb4cb3..cce800e 100644 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -1,130 +1,115 @@ -from bottom.connection import Connection -import pytest - - [email protected] -def conn(patch_connection, events, run): - ''' Generic connection that is ready to read/send ''' - conn = Connection("host", "port", events, "UTF-8", True) - run(conn.connect()) - assert conn.connected - return conn - - -def test_connect(patch_connection, writer, events, run): +def test_connect(connection, events, writer, schedule, flush): ''' Connection.Connect opens a writer, triggers CLIENT_CONNECT ''' - conn = Connection("host", "port", events, "UTF-8", True) - run(conn.connect()) - assert conn.connected + schedule(connection.connect()) + flush() + assert connection.connected assert not writer.closed assert events.triggered("CLIENT_CONNECT") -def test_connect_already_connected(patch_connection, writer, events, run): +def test_already_connected(connection, events, writer, schedule, flush): ''' Does not trigger CLIENT_CONNECT multiple times ''' - conn = Connection("host", "port", events, "UTF-8", True) - run(conn.connect()) - run(conn.connect()) + schedule(connection.connect(), connection.connect()) + flush() assert not writer.closed assert events.triggered("CLIENT_CONNECT") -def test_disconnect_before_connect(patch_connection, events, run): +def test_disconnect_before_connect(connection, events, schedule, flush): ''' disconnect before connect does nothing ''' - conn = Connection("host", "port", events, "UTF-8", True) - run(conn.disconnect()) - assert not conn.connected + schedule(connection.disconnect()) + flush() + assert not connection.connected assert not events.triggered("CLIENT_CONNECT") assert not events.triggered("CLIENT_DISCONNECT") -def test_disconnect(writer, patch_connection, events, run): +def test_disconnect(writer, patch_connection, events, connection, + schedule, flush): ''' Connection.disconnect closes writer, triggers CLIENT_DISCONNECT ''' - conn = Connection("host", "port", events, "UTF-8", True) - run(conn.connect()) - run(conn.disconnect()) - assert not conn.connected + schedule(connection.connect(), connection.disconnect()) + flush() + assert not connection.connected assert writer.closed - assert conn.writer is None + assert connection.writer is None assert events.triggered("CLIENT_CONNECT") assert events.triggered("CLIENT_DISCONNECT") -def test_disconnect_already_disconnected(patch_connection, events, run): +def test_already_disconnected(connection, events, schedule, flush): ''' Does not trigger CLIENT_DISCONNECT multiple times ''' - conn = Connection("host", "port", events, "UTF-8", True) - run(conn.connect()) - run(conn.disconnect()) - run(conn.disconnect()) + schedule(connection.connect(), + connection.disconnect(), + connection.disconnect()) + flush() assert events.triggered("CLIENT_CONNECT") assert events.triggered("CLIENT_DISCONNECT") -def test_send_before_connected(patch_connection, writer, events, run): +def test_send_before_connected(connection, writer): ''' Nothing happens when sending before connecting ''' - conn = Connection("host", "port", events, "UTF-8", True) - assert not conn.connected - conn.send("test") + assert not connection.connected + connection.send("test") assert not writer.used -def test_send_disconnected(patch_connection, writer, events, run): +def test_send_disconnected(connection, writer, schedule, flush): ''' Nothing happens when sending after disconnecting ''' - conn = Connection("host", "port", events, "UTF-8", True) - run(conn.connect()) - run(conn.disconnect()) - conn.send("test") + schedule(connection.connect(), connection.disconnect()) + flush() + connection.send("test") assert not writer.used -def test_send_strips(conn, writer): +def test_send_strips(connection, writer, loop): ''' Send strips whitespace from string ''' - conn.send(" a b c | @#$ d ") + loop.run_until_complete(connection.connect()) + connection.send(" a b c | @#$ d ") assert writer.used assert writer.has_written("a b c | @#$ d\n") -def test_read_before_connected(patch_connection, reader, events, run): +def test_read_before_connected(connection, reader, loop): ''' Nothing happens when reading before connecting ''' - conn = Connection("host", "port", events, "UTF-8", True) - value = run(conn.read()) + value = loop.run_until_complete(connection.read()) assert not value assert not reader.used -def test_read_disconnected(patch_connection, reader, events, run): +def test_read_disconnected(connection, reader, schedule, flush, loop): ''' Nothing happens when reading after disconnecting ''' - conn = Connection("host", "port", events, "UTF-8", True) - run(conn.connect()) - run(conn.disconnect()) - value = run(conn.read()) + schedule(connection.connect(), connection.disconnect()) + flush() + value = loop.run_until_complete(connection.read()) assert not value assert not reader.used -def test_read_eoferror(conn, reader, run): +def test_read_eoferror(connection, reader, loop): ''' Nothing to read ''' - value = run(conn.read()) + loop.run_until_complete(connection.connect()) + value = loop.run_until_complete(connection.read()) assert not value assert reader.used -def test_read_strips(conn, reader, run): +def test_read_strips(connection, reader, loop): ''' newline and space characters are stripped off ''' reader.push(" a b c | @#$ d \n") - value = run(conn.read()) + loop.run_until_complete(connection.connect()) + value = loop.run_until_complete(connection.read()) assert value == "a b c | @#$ d" assert reader.has_read(" a b c | @#$ d \n") -def test_run_without_message(conn, events, run): +def test_run_without_message(connection, events, loop): ''' Connection.run should connect, read empty, disconnect, return ''' - run(conn.run()) + loop.run_until_complete(connection.run()) assert events.triggered("CLIENT_CONNECT") assert events.triggered("CLIENT_DISCONNECT") -def test_run_trigger_command(conn, reader, events, eventparams, run): +def test_run_trigger_command(connection, reader, events, eventparams, loop): eventparams["PRIVMSG"] = ["nick", "user", "host", "target", "message"] reader.push(":nick!user@host PRIVMSG #target :this is message") received = [] @@ -133,16 +118,15 @@ def test_run_trigger_command(conn, reader, events, eventparams, run): def receive(nick, user, host, target, message): received.extend([nick, user, host, target, message]) - run(conn.run()) - + loop.run_until_complete(connection.run()) assert reader.has_read(":nick!user@host PRIVMSG #target :this is message") assert events.triggered("PRIVMSG") assert received == ["nick", "user", "host", "#target", "this is message"] -def test_run_trigger_unknown_command(conn, reader, events, run): +def test_run_trigger_unknown_command(connection, reader, events, loop): reader.push("unknown_command") - run(conn.run()) + loop.run_until_complete(connection.run()) assert reader.has_read("unknown_command") assert not events.triggered("unknown_command") diff --git a/tests/test_event.py b/tests/test_event.py index 1859eb3..5d496fe 100644 --- a/tests/test_event.py +++ b/tests/test_event.py @@ -14,14 +14,8 @@ def getparams(): @pytest.fixture -def events(getparams): - return event.EventsMixin(getparams) - - [email protected] -def run(): - loop = asyncio.get_event_loop() - return lambda coro: loop.run_until_complete(coro) +def events(getparams, loop): + return event.EventsMixin(getparams, loop=loop) @pytest.fixture @@ -43,7 +37,6 @@ def watch(): # EventsMixin.on # ============== - def test_on_subset(events): ''' register a handler with a subset of available parameters ''' for e in ["0", "1", "2"]: @@ -90,7 +83,8 @@ def test_defaults(events): def test_on_coroutine(events): ''' coroutines are fine ''' - handle = asyncio.coroutine(lambda one: None) + async def handle(one): + pass events.on("1")(handle) @@ -98,95 +92,84 @@ def test_on_coroutine(events): # EventsMixin.trigger # =================== -def test_trigger(events, run, watch): +def test_trigger(events, watch, flush): ''' trigger calls registered handler ''' w = watch() - # Register handler - increment call counter when called events.on("0")(lambda: w.call()) - # Trigger handler - run(events.trigger("0")) - # Make sure we called once + events.trigger("0") + flush() assert w.called -def test_trigger_multiple_calls(events, run, watch): +def test_trigger_multiple_calls(events, watch, flush): ''' trigger calls re-registered handler twice ''' w = watch() - # Register handler twice - increment call counter when called events.on("0")(lambda: w.call()) events.on("0")(lambda: w.call()) - # Trigger handler - run(events.trigger("0")) - # Make sure we called twice + events.trigger("0") + flush() assert w.calls == 2 -def test_trigger_multiple_handlers(events, run, watch): +def test_trigger_multiple_handlers(events, watch, flush): ''' trigger calls re-registered handler twice ''' w1 = watch() w2 = watch() - # Register two handlers events.on("0")(lambda: w1.call()) events.on("0")(lambda: w2.call()) - # Trigger handler - run(events.trigger("0")) - # Make sure we called each once + events.trigger("0") + flush() assert w1.calls == 1 assert w2.calls == 1 -def test_trigger_no_handlers(events, run): +def test_trigger_no_handlers(events, flush): ''' trigger an event with no handlers ''' - run(events.trigger("some event")) + events.trigger("some event") + flush() -def test_trigger_superset_params(events, run): +def test_trigger_superset_params(events, flush): ''' trigger an event with kwarg keys that aren't in event params ''' params = {} def func(one, two): params["one"] = one params["two"] = two - events.on("2")(func) - kwargs = {"one": 1, "two": 2, "unused": "value"} - run(events.trigger("2", **kwargs)) - + events.trigger("2", **kwargs) + flush() assert params["one"] == 1 assert params["two"] == 2 -def test_trigger_subset_params(events, run): +def test_trigger_subset_params(events, flush): ''' trigger an event with missing kwargs pads with None ''' params = {} def func(one, two): params["one"] = one params["two"] = two - events.on("2")(func) - kwargs = {"one": 1} - run(events.trigger("2", **kwargs)) - + events.trigger("2", **kwargs) + flush() assert params["one"] == 1 assert params["two"] is None -def test_trigger_subset_params_with_defaults(events, run): +def test_trigger_subset_params_with_defaults(events, flush): ''' trigger an event with missing kwargs uses function defaults ''' params = {} def func(one, two="default"): params["one"] = one params["two"] = two - events.on("2")(func) - kwargs = {"one": 1} - run(events.trigger("2", **kwargs)) - + events.trigger("2", **kwargs) + flush() assert params["one"] == 1 assert params["two"] == "default" @@ -196,7 +179,7 @@ def test_trigger_subset_params_with_defaults(events, run): # =================== -def test_bound_method_of_instance(events, run): +def test_bound_method_of_instance(events, flush): ''' verify bound methods are correctly inspected ''' params = {} @@ -207,9 +190,38 @@ def test_bound_method_of_instance(events, run): instance = Class() bound_method = instance.method events.on("2")(bound_method) - kwargs = {"one": 1} - run(events.trigger("2", **kwargs)) - + events.trigger("2", **kwargs) + flush() assert params["one"] == 1 assert params["two"] == "default" + + +# =================== +# Ordering + Blocking +# =================== + + +def test_callback_ordering(events, flush, loop): + ''' Callbacks for a second event don't queue behind the first event ''' + second_complete = asyncio.Event(loop=loop) + call_order = [] + complete_order = [] + + async def first(): + call_order.append("first") + await second_complete.wait() + complete_order.append("first") + + async def second(): + call_order.append("second") + complete_order.append("second") + second_complete.set() + + events.on("0")(first) + events.on("0")(second) + + events.trigger("0") + flush() + assert call_order == ["first", "second"] + assert complete_order == ["second", "first"] diff --git a/tests/test_plugins/test_router.py b/tests/test_plugins/test_router.py index eb6a434..955becb 100644 --- a/tests/test_plugins/test_router.py +++ b/tests/test_plugins/test_router.py @@ -1,42 +1,12 @@ import pytest -from bottom import Client from bottom.plugins.router import Router -class MockConnection(): - def __init__(self, *a, **kw): - pass - - -class MockClient(Client): - __conn_cls__ = MockConnection - - def __init__(self, *args, **kwargs): - self.handlers = [] - super().__init__(*args, **kwargs) - - def on(self, command): - def wrap(function): - self.handlers.append((command, function)) - return function - return wrap - - [email protected] -def client(): - return MockClient("host", "port") - - @pytest.fixture def router(client): return Router(client) -def test_init_registers_privmsg(client): - router = Router(client) - assert ("PRIVMSG", router.handle) in client.handlers - - def test_decorator_returns_original(router): def original_func(nick, target, fields): pass @@ -45,43 +15,42 @@ def test_decorator_returns_original(router): assert wrapped_func is original_func -def test_handle_no_routes(router, loop): - loop.run_until_complete( - router.handle("nick", "target", "message")) +def test_handle_no_routes(router, loop, flush): + router.handle("nick", "target", "message") + flush() -def test_handle_no_matching_route(router, loop): +def test_handle_no_matching_route(router, loop, flush): @router.route("hello, [name]") async def handle(nick, target, fields): # Should not be called assert False - loop.run_until_complete( - router.handle("nick", "target", "does not match")) + router.handle("nick", "target", "does not match") + flush() -def test_handle_with_matching_route(router, loop): +def test_handle_with_matching_route(router, loop, flush): names = [] @router.route("hello, [name]") def handle(nick, target, fields): names.append(fields['name']) - loop.run_until_complete( - router.handle("nick", "target", "hello, jack")) - loop.run_until_complete( - router.handle("nick", "target", "hello, hello, recursion")) + router.handle("nick", "target", "hello, jack") + router.handle("nick", "target", "hello, hello, recursion") + flush() assert ["jack", "hello, recursion"] == names -def test_back_reference(router, loop): +def test_back_reference(router, loop, flush): actual_fields = {} @router.route("<[tag]>[field]</[:ref(tag)]>") def handle(nick, target, fields): actual_fields.update(fields) - loop.run_until_complete( - router.handle("nick", "target", "<element>some value here</element>")) + router.handle("nick", "target", "<element>some value here</element>") + flush() assert {"field": "some value here", "tag": "element"} == actual_fields
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_removed_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 10 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "flake8" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.10", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
-e git+https://github.com/numberoverzero/bottom.git@4293d2726d2a7222faa55ca509871ee03f4e66e1#egg=bottom certifi==2025.1.31 charset-normalizer==3.4.1 click==8.1.8 colorama==0.4.6 coverage==7.8.0 exceptiongroup==1.2.2 flake8==7.2.0 idna==3.10 iniconfig==2.1.0 mccabe==0.7.0 packaging==24.2 pluggy==1.5.0 pycodestyle==2.13.0 pyflakes==3.3.1 pytest==8.3.5 pytest-cov==6.0.0 python-dotenv==1.1.0 requests==2.32.3 simplex==1.2.55 tomli==2.2.1 urllib3==2.3.0
name: bottom channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - bzip2=1.0.8=h5eee18b_6 - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - libuuid=1.41.5=h5eee18b_0 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py310h06a4308_0 - python=3.10.16=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py310h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py310h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - certifi==2025.1.31 - charset-normalizer==3.4.1 - click==8.1.8 - colorama==0.4.6 - coverage==7.8.0 - exceptiongroup==1.2.2 - flake8==7.2.0 - idna==3.10 - iniconfig==2.1.0 - mccabe==0.7.0 - packaging==24.2 - pluggy==1.5.0 - pycodestyle==2.13.0 - pyflakes==3.3.1 - pytest==8.3.5 - pytest-cov==6.0.0 - python-dotenv==1.1.0 - requests==2.32.3 - simplex==1.2.55 - tomli==2.2.1 - urllib3==2.3.0 prefix: /opt/conda/envs/bottom
[ "tests/test_client.py::test_default_event_loop", "tests/test_client.py::test_send_unknown_command", "tests/test_client.py::test_send_before_connected", "tests/test_client.py::test_send_after_disconnected", "tests/test_client.py::test_on", "tests/test_client.py::test_run_", "tests/test_connection.py::test_connect", "tests/test_connection.py::test_already_connected", "tests/test_connection.py::test_disconnect_before_connect", "tests/test_connection.py::test_disconnect", "tests/test_connection.py::test_already_disconnected", "tests/test_connection.py::test_send_before_connected", "tests/test_connection.py::test_send_disconnected", "tests/test_connection.py::test_send_strips", "tests/test_connection.py::test_read_before_connected", "tests/test_connection.py::test_read_disconnected", "tests/test_connection.py::test_read_eoferror", "tests/test_connection.py::test_read_strips", "tests/test_connection.py::test_run_without_message", "tests/test_connection.py::test_run_trigger_command", "tests/test_connection.py::test_run_trigger_unknown_command", "tests/test_event.py::test_on_subset", "tests/test_event.py::test_on_all", "tests/test_event.py::test_on_superset", "tests/test_event.py::test_on_ordering", "tests/test_event.py::test_with_kwargs", "tests/test_event.py::test_with_kwargs_masking", "tests/test_event.py::test_var_args", "tests/test_event.py::test_defaults", "tests/test_event.py::test_on_coroutine", "tests/test_event.py::test_trigger", "tests/test_event.py::test_trigger_multiple_calls", "tests/test_event.py::test_trigger_multiple_handlers", "tests/test_event.py::test_trigger_no_handlers", "tests/test_event.py::test_trigger_superset_params", "tests/test_event.py::test_trigger_subset_params", "tests/test_event.py::test_trigger_subset_params_with_defaults", "tests/test_event.py::test_bound_method_of_instance", "tests/test_plugins/test_router.py::test_handle_no_routes" ]
[ "tests/test_event.py::test_callback_ordering", "tests/test_plugins/test_router.py::test_decorator_returns_original", "tests/test_plugins/test_router.py::test_handle_no_matching_route", "tests/test_plugins/test_router.py::test_handle_with_matching_route", "tests/test_plugins/test_router.py::test_back_reference" ]
[]
[]
MIT License
363
cmc-python__modelmachine-20
fa9275d64498c7cbe24f02357bbb1bc971670756
2016-01-02 19:08:24
fa9275d64498c7cbe24f02357bbb1bc971670756
diff --git a/Makefile b/Makefile index ed9fdc1..f83193b 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ GENERATED = build dist *.egg-info all : test lint pep257 dist -twine : +twine : dist twine upload dist/* clean : diff --git a/README.md b/README.md index 0c15024..5460313 100644 --- a/README.md +++ b/README.md @@ -1,22 +1,16 @@ # modelmachine Model machine emulator -[![Build Status](https://travis-ci.org/cmc-python/modelmachine.svg?branch=master)](https://travis-ci.org/cmc-python/modelmachine) +[![Build Status](https://travis-ci.org/vslutov/modelmachine.svg?branch=master)](https://travis-ci.org/vslutov/modelmachine) ## TODO -* УМ-Р (регистровая) -* УМ с модификацией адресов ??? - -* Изменить базовую концепцию memory->integer и переписать модули -* Добавить test.alu.swap +* Проверить test.alu.swap * Работа с плавающей запятой * Подумать еще о mock в тестах -* Подумать о команде остановки halt * Переделать документацию модулей * Исправить опечатки в документации * Расширить howto -* ГУИ ## Модельная машина @@ -91,8 +85,8 @@ Model machine emulator * `numeric.py` - целочисленная арифметика с фиксированным числом двоичных знаков * `alu.py` - арифметико-логическое устройство, работает с четко - специализированными регистрами: `R1`, `R2`, `S`, `FLAGS` и `IP`. -* `cu.py` *в процессе реализации* - контролирующее устройство, выполняющее + специализированными регистрами: `R1`, `R2`, `S`, `FLAGS` и `PC`. +* `cu.py` - контролирующее устройство, выполняющее считывание команд из памяти и запускающее необходимые методы в арифметико-логическом устройстве * `io.py` - устройство ввода-вывода @@ -131,7 +125,7 @@ Model machine emulator * `R1`, `R2`, `S` для арифметических операций. * `FLAGS` для хранения флагов состояния. -* `IP` *только* для пересылки туда адреса из регистра `R1` при условных +* `PC` *только* для пересылки туда адреса из регистра `R1` при условных переходах. Схема работы: @@ -148,7 +142,7 @@ Model machine emulator остатком. `S := R1 / R2; R1 := R1 % R2`. * Команда пересылки `move`: `S := R1`. * Команды безусловного перехода `jump` и условного перехода `cond_jump` - работают по схеме `IP := R1`, режим работы `cond_jump` зависит от того, + работают по схеме `PC := R1`, режим работы `cond_jump` зависит от того, какие дополнительные аргументы будут переданы. * Команда останова `halt` просто выставляет флаг остановки HALT в регистре флагов @@ -163,8 +157,8 @@ Model machine emulator 1. `fetch_and_decode` - загрузка и расшифровка очередной команды. Содержимое ячейки оперативной памяти с адресом записанным - в регистре `IP` загружается в регистр `IR`, затем из него извлекается - код операции и адреса операндов, затем счетчик `IP` увеличивается на + в регистре `PC` загружается в регистр `RI`, затем из него извлекается + код операции и адреса операндов, затем счетчик `PC` увеличивается на длину только что считанной команды. 2. `load` - данные по только что считанным адресам загружаются в регистры процессора `R1` и `R2` @@ -225,9 +219,9 @@ Model machine emulator ### Таблица команд модельных машин -|OPCODE|mm-3 |mm-2 |mm-v |mm-1 |mm-st | +|OPCODE|mm-3 |mm-2 |mm-v |mm-1 |mm-m | |:-----|:---:|:---:|:---:|:---:|:----:| -|0x00 |move |move |move |load | | +|0x00 |move |move |move |load | load | |0x01 | add | add | add | add | add | |0x02 | sub | sub | sub | sub | sub | |0x03 |smul |smul |smul |smul | smul | @@ -235,12 +229,15 @@ Model machine emulator |0x05 | |comp |comp |comp | comp | |0x13 |umul |umul |umul |umul | umul | |0x14 |udiv |udiv |udiv |udiv | udiv | -|0x10 | | | |store| | -|0x20 | | | |swap | | -|0x5A | | | | |stpush| -|0x5B | | | | |stpop | -|0x5C | | | | |stdup | -|0x5D | | | | |stswap| +|0x10 | | | |store|store | +|0x20 | | | |swap | move | +|0x21 | | | | | radd | +|0x22 | | | | | rsub | +|0x23 | | | | |rsmul | +|0x24 | | | | |rsdiv | +|0x25 | | | | |rcomp | +|0x33 | | | | |rumul | +|0x34 | | | | |rudiv | |0x80 |jump |jump |jump |jump | jump | |0x81 | jeq | jeq | jeq | jeq | jeq | |0x82 |jneq |jneq |jneq |jneq | jneq | @@ -273,7 +270,7 @@ Model machine emulator |ujleq | <= u |unsigned jump if less or equal | |ujg | > u |unsigned jump if greater | -### mm3 +### mm-3 Архитектура трехадресной модельной машины. @@ -281,15 +278,15 @@ Model machine emulator * Размер адреса: 2 байта. * Арифметические вычисления производятся с одной ячейкой оперативной памяти. * Код команды помещается в одну ячейку оперативной памяти `КОП А1 А2 А3`. -* Регистры: `S`, `R1`, `R2`, `FLAGS`, `IP`, `IR`, `ADDR`. +* Регистры: `S`, `R1`, `R2`, `FLAGS`, `PC`, `RI`, `ADDR`. Назначение регистров: * `S` - регистр сумматор, в него записывается результат арифметической операции. * `R1`, `R2` - регистры операндов арифметических операций. * `FLAGS` - регистр флагов. -* `IP` - регистр указатель инструкции. -* `IR` - регистр для хранения инструкции. +* `PC` - регистр указатель инструкции. +* `RI` - регистр для хранения инструкции. * `ADDR` - регистр для хранения адреса для инструкции перехода. Действия процессора для арифметических инструкций (`add`, `sub`, @@ -306,12 +303,12 @@ Model machine emulator два результата: частное – в ячейку с адресом `А3`, остаток – в следующую ячейку, по адресу `(А3+1) mod 16^4`. -* `jump A1 A2 A3`: `IP := A3` +* `jump A1 A2 A3`: `PC := A3` * Условные переходы: сравниваются `R1` и `R2`, в зависимости от результата - происходит `IP := A3`. + происходит `PC := A3`. * Команда пересылки `move`: [A3] := R1. -### mm2 +### mm-2 Архитектура двухадресной модельной машины. @@ -319,7 +316,7 @@ Model machine emulator * Размер адреса: 2 байта. * Арифметические вычисления производятся с одной ячейкой оперативной памяти. * Код команды помещается в одну ячейку оперативной памяти `КОП А1 А2`. -* Регистры: `R1`, `R2`, `FLAGS`, `IP`, `IR`, `ADDR`. +* Регистры: `R1`, `R2`, `FLAGS`, `PC`, `RI`, `ADDR`. Действия для арифметических команд `add`, `sub`, `smul`, `sdiv`, `umul`, `udiv`: @@ -333,12 +330,12 @@ Model machine emulator 1. `R1 := [A1], R2 := [A2]` 2. Запустить в АЛУ схему `sub`, выставить регистр `FLAGS` -* `jump A1 A2`: `IP := A2` +* `jump A1 A2`: `PC := A2` * Условные переходы делаются исходя из регистра `FLAGS` * `move A1 A2`: `[A1] := [A2]` * Команда останова `halt` взводит флаг `HALT` в регистре `FLAGS` -### mmv +### mm-v Архитектура модельной машины с переменным (variable) фарматом команд. @@ -348,7 +345,7 @@ Model machine emulator памяти. * Код команды занимает разное количество ячеек в зависимости от выполняемой операции. -* Регистры: `R1`, `R2`, `FLAGS`, `IP`, `IR`, `ADDR`. +* Регистры: `R1`, `R2`, `FLAGS`, `PC`, `RI`, `ADDR`. Таблица кодов команд: @@ -387,12 +384,12 @@ Model machine emulator 1. `R1 := [A1], R2 := [A2]` 2. Запустить в АЛУ схему `sub`, выставить регистр `FLAGS` -* `jump A1`: `IP := A1` +* `jump A1`: `PC := A1` * Условные переходы делаются исходя из регистра `FLAGS` * `move A1 A2`: `[A1] := [A2]` * Команда останова `halt` взводит флаг `HALT` в регистре `FLAGS` -### mm1 +### mm-1 Архитектура одноадресной модельной машины. @@ -400,7 +397,7 @@ Model machine emulator * Размер адреса: 2 байта. * Арифметические вычисления производятся с одной ячейкой оперативной памяти. * Код команды помещается в одну ячейку оперативной памяти `КОП А`. -* Регистры: `S`, `R`, `S1`, `FLAGS`, `IP`, `IR`. +* Регистры: `S`, `R`, `S1`, `FLAGS`, `PC`, `RI`. Регистры `S` и `S1` хранят информацию постоянно, а не затираются при выполнении очередной команды, как было раньше. В регистр `R` закгружается @@ -422,108 +419,123 @@ Model machine emulator 1. `R := [A]` 2. Запустить в АЛУ схему `sub`, выставить регистр `FLAGS` -* `jump A`: `IP := A` +* `jump A`: `PC := A` * Условные переходы делаются исходя из регистра `FLAGS` * `load A`: `S := [A]` * `store A`: `[A] := S` * `swap`: `S, S1 := S1, S` * Команда останова `halt` взводит флаг `HALT` в регистре `FLAGS` -### mmst +### mm-m -Архитектура стековой (stack) модельной машины. +Архитектура модельной машины с модификацией адресов (modification). -* Размер ячейки оперативной памяти: 1 байт. +* Размер ячейки оперативной памяти: 2 байта. * Размер адреса: 2 байта. -* Арифметические вычисления производятся со словом в 3 байта. +* Арифметические вычисления производятся со словом в 4 байта. * Код команды занимает разное количество ячеек в зависимости от выполняемой - операции. Большинство команд безадресные, имеют формат `КОП` и занимают - 1 байт. Некоторые команды работы со стеком и команды перехода имеют - один операнд, формат `КОП А` и занимают 3 байта. -* Регистры: `R1`, `R2`, `FLAGS`, `IP`, `IR`, `SP`. - -Регистр `SP` - указатель стека (stack pointer) указывает на вершину стека. + операции. Арифметические команды имеют формы регистр-регистр и регистр-память. + Команды регистр-регистр имеют формат `КОП RA1 RA2` и занимают 2 байта. + Команды регистр-память имеют формат `КОП R M A` и занимают 4 байта. + Команды перехода имеют формат `КОП 0 0 A` и занимают 4 байта. +* Регистры: `R0-RF`, `S`, `RZ`, `FLAGS`, `PC`, `RI`. + +Основное отличие этой машины от предыдущих - наличие адресуемых регистров +общего назначения `R0-RF`, используемых для арифметических +вычислений и адресации памяти. `S`, `RZ` - неадресуемые регистры для работы +АЛУ. + +Также адресация данных теперь производится таким алгоритмом: + +1. Возьмем содержимое адресуемого регистра с номером `M` (от 0x0 до 0xF): `[M]`. + Если номер регистра `M` равен нулю, значение `[M]` также равно нулю вне + зависимости от содержимого регистра `R0`. +2. Добавим к нему адрес `A` (от 0x0000 до 0xFFFF): `[M] + A`. +3. Возьмем остаток от деления этого адреса на 2^16: `([M] + A) % 2^16`. +4. Возьмем из ОЗУ данные по полученному адресу: `[[M] + A]`. Таблица кодов команд: -|Код команды|Мнемоник|Формат |Длина (в байтах)| -|:----------|:------:|:--------|---------------:| -|0x01 |add |add | 1| -|0x02 |sub |sub | 1| -|0x03 |smul |smul | 1| -|0x04 |sdiv |sdiv | 1| -|0x05 |comp |comp | 1| -|0x13 |umul |umul | 1| -|0x14 |udiv |udiv | 1| -|0x5A |stpush |stpush A | 3| -|0x5B |stpop |stpop A | 3| -|0x5C |stdup |stdup | 1| -|0x5D |stswap |stswap | 1| -|0x80 |jump |jump A | 3| -|0x81 |jeq |jeq A | 3| -|0x82 |jneq |jneq A | 3| -|0x83 |sjl |sjl A | 3| -|0x84 |sjgeq |sjgeq A | 3| -|0x85 |sjleq |sjleq A | 3| -|0x86 |sjg |sjg A | 3| -|0x93 |ujl |ujl A | 3| -|0x94 |ujgeq |ujgeq A | 3| -|0x95 |ujleq |ujleq A | 3| -|0x96 |ujg |ujg A | 3| -|0x99 |halt |halt | 1| - -Как действует метод `push(value)`: - -1. `SP -= value_size` -2. `[SP] := value` - -Как действует метод `pop()`: - -1. `value := [SP]` -2. `SP += value_size` -2. `return value` - -Принцип работы стековых команд: - -В стек `stpush A`: - -1. `R1 := [A]` -2. `push(R1)` - -Из стека `stpop A`: - -1. `R1 := pop()` -2. `[A] := R1` - -Дублирование `stdup`: - -1. `R1 := pop()` -2. `push(R1); push(R1)` - -Обмен `stswap`: - -1. `R1 := pop(); R2 := pop()` -2. `push(R1); push(R2)` - -Действия для арифметических команд (исключая деление) `add`, `sub`, `smul`, -`umul`: - -1. `R2 := pop(); R1 := pop()` -2. `R1 := R1 op R2` -3. `push(R1)` - -Действия для команд деления `sdivmod` и `udivmod`: - -1. `R2 := pop(); R1 := pop()` -2. `R1, R2 := R1 / R2, R1 % R2` -3. `push(R1); push(R2)` - -Действия для команды сравнения `cmp`: - -1. `R2 := pop(), R1 := pop()` -2. Запустить в АЛУ схему `sub`, выставить регистр `FLAGS` - -* `jump A1`: `IP := A1` +|Код команды|Мнемоник|Формат |Длина (в байтах)| +|:----------|:------:|:----------|---------------:| +|0x00 |load |load R M A | 4| +|0x01 |add |add R M A | 4| +|0x02 |sub |sub R M A | 4| +|0x03 |smul |smul R M A | 4| +|0x04 |sdiv |sdiv R M A | 4| +|0x05 |comp |comp R M A | 4| +|0x13 |umul |umul R M A | 4| +|0x14 |udiv |udiv R M A | 4| +|0x10 |store |store R M A| 4| +|0x20 |rmove |rmove RX RY| 2| +|0x21 |radd |radd RX RY | 2| +|0x22 |rsub |rsub RX RY | 2| +|0x23 |rsmul |rsmul RX RY| 2| +|0x24 |rsdiv |rsdiv RX RY| 2| +|0x25 |rcomp |rcomp RX RY| 2| +|0x33 |rumul |rumul RX RY| 2| +|0x34 |rudiv |rudiv RX RY| 2| +|0x80 |jump |jump 0 M A | 4| +|0x81 |jeq |jeq 0 M A | 4| +|0x82 |jneq |jneq 0 M A | 4| +|0x83 |sjl |sjl 0 M A | 4| +|0x84 |sjgeq |sjgeq 0 M A| 4| +|0x85 |sjleq |sjleq 0 M A| 4| +|0x86 |sjg |sjg 0 M A | 4| +|0x93 |ujl |ujl 0 M A | 4| +|0x94 |ujgeq |ujgeq 0 M A| 4| +|0x95 |ujleq |ujleq 0 M A| 4| +|0x96 |ujg |ujg 0 M A | 4| +|0x99 |halt |halt 00 | 2| + +Действия для арифметических команд регистр-память (исключая деление) `add`, +`sub`, `smul`, `umul` (формат `op R M A`): + +1. `S, RZ := R, [[M] + A]` +2. `S := S op RZ` +3. `R := S` + +Действия для команд деления регистр-память `sdivmod` и `udivmod` +(формат `op R M A`, `R_next` - регистр, следующий за регистром `R`): + +1. `S, RZ := S, [[M] + A]` +2. `S, RZ := S / RZ, S % RZ` +3. `R, R_next := S, RZ` + +Действия для команды сравнения `comp R M A`: + +1. `S, RZ := S, [[M] + A]` +2. Запустить в АЛУ схему `sub S RZ`, выставить регистр `FLAGS` + +Действия для команды загрузки `load R M A`: + +1. `R := [[M] + A]` + +Действия для команды выгрузки `store R M A`: + +1. `[[M] + A] := R` + +Действия для арифметических команд регистр-регистр (исключая деление) `radd`, +`rsub`, `rsmul`, `rumul` (формат `op RX RY`): + +1. `S, RZ := RX, RY` +2. `S := S op RZ` +3. `RX := S` + +Действия для команд деления регистр-регистр `rsdiv` и `rudiv` +(формат - `op RX RY`; `RX_next` - регистр, следующий за регистром `RX`, если +`RX = RF`, то `RX_next = R0`): + +1. `S, RZ := RX, RY` +2. `S, RZ := S / RZ, S % RZ` +3. `RX, RX_next := S, RZ` + +Действия для команды сравнения `rcomp RX RY`: + +1. `S, RZ := RX, RY` +2. Запустить в АЛУ схему `sub S RZ`, выставить регистр `FLAGS` + +* `jump 00 A`: `PC := A` * Условные переходы делаются исходя из регистра `FLAGS` * Команда останова `halt` взводит флаг `HALT` в регистре `FLAGS` diff --git a/modelmachine/__main__.py b/modelmachine/__main__.py index 852943e..ffaaf6a 100644 --- a/modelmachine/__main__.py +++ b/modelmachine/__main__.py @@ -5,7 +5,7 @@ from modelmachine.ide import get_program, get_cpu, debug import pytest, os, sys, argparse -VERSION = "0.0.6" # Don't forget fix in setup.py +VERSION = "0.1.0" # Don't forget fix in setup.py def run_program(args): cpu = get_program(args.filename, args.protect_memory) @@ -32,7 +32,7 @@ def main(argv, stdout): help='print version and exit') parser.add_argument('-m', '--protect_memory', action='store_true', default=False, - help='raise an error if try to read dirty memory') + help='raise an error, if program tries read dirty memory') subparsers = parser.add_subparsers(title='commands', help='commands for model machine emulator') diff --git a/modelmachine/cpu.py b/modelmachine/cpu.py index 6eaec7e..6be0540 100644 --- a/modelmachine/cpu.py +++ b/modelmachine/cpu.py @@ -16,7 +16,7 @@ from modelmachine.cu import ControlUnit3 as BCU3 from modelmachine.cu import ControlUnit2 as BCU2 from modelmachine.cu import ControlUnitV as BCUV from modelmachine.cu import ControlUnit1 as BCU1 -from modelmachine.cu import ControlUnitS as BCUS +from modelmachine.cu import ControlUnitM as BCUM from modelmachine.alu import ArithmeticLogicUnit from modelmachine.io import InputOutputUnit @@ -107,7 +107,7 @@ class CPUMM3(AbstractCPU): register_names=self.register_names, operand_size=word_size, address_size=address_size) - self.control_unit = BCU3(instruction_size=word_size, + self.control_unit = BCU3(ir_size=word_size, registers=self.registers, ram=self.ram, alu=self.alu, @@ -136,7 +136,7 @@ class CPUMM2(AbstractCPU): register_names=self.register_names, operand_size=word_size, address_size=address_size) - self.control_unit = BCU2(instruction_size=word_size, + self.control_unit = BCU2(ir_size=word_size, registers=self.registers, ram=self.ram, alu=self.alu, @@ -195,7 +195,7 @@ class CPUMM1(AbstractCPU): register_names=self.register_names, operand_size=word_size, address_size=address_size) - self.control_unit = BCU1(instruction_size=word_size, + self.control_unit = BCU1(ir_size=word_size, registers=self.registers, ram=self.ram, alu=self.alu, @@ -206,39 +206,39 @@ class CPUMM1(AbstractCPU): word_size=word_size) -class CPUMMS(AbstractCPU): +class CPUMMM(AbstractCPU): - """CPU stack model machine.""" + """CPU address modification model machine.""" def __init__(self, protect_memory): """See help(type(x)).""" byte_size = 8 - word_size = 3 * byte_size - address_size = 2 * byte_size + address_size = word_size = 2 * byte_size + operand_size = ir_size = 4 * byte_size memory_size = 2 ** address_size - self.ram = RandomAccessMemory(word_size=byte_size, + self.ram = RandomAccessMemory(word_size=word_size, memory_size=memory_size, endianess='big', # Unused is_protected=protect_memory) self.registers = RegisterMemory() - self.register_names = BCUS.register_names + self.register_names = BCUM.register_names self.alu = ArithmeticLogicUnit(registers=self.registers, register_names=self.register_names, - operand_size=word_size, + operand_size=operand_size, address_size=address_size) - self.control_unit = BCUS(ir_size=word_size, + self.control_unit = BCUM(ir_size=ir_size, registers=self.registers, ram=self.ram, alu=self.alu, - operand_size=word_size, + operand_size=operand_size, address_size=address_size) self.io_unit = InputOutputUnit(ram=self.ram, start_address=0, - word_size=word_size) + word_size=operand_size) CPU_LIST = {'mm3': CPUMM3, 'mm2': CPUMM2, 'mmv': CPUMMV, 'mm1': CPUMM1, - 'mms': CPUMMS} + 'mmm': CPUMMM} diff --git a/modelmachine/cu.py b/modelmachine/cu.py index 33ae4dc..86ee0c7 100644 --- a/modelmachine/cu.py +++ b/modelmachine/cu.py @@ -3,6 +3,7 @@ """Control unit parse instruction and give the commands to another part of computer.""" from modelmachine.alu import HALT, LESS, GREATER, EQUAL +from modelmachine.numeric import Integer RUNNING = 1 HALTED = 2 @@ -80,6 +81,15 @@ class ControlUnit(AbstractControlUnit): "udivmod": 0x14, "swap": 0x20, + "rmove": 0x20, + + "radd": 0x21, + "rsub": 0x22, + "rsmul": 0x23, + "rsdivmod": 0x24, + "rcomp": 0x25, + "rumul": 0x33, + "rudivmod": 0x34, "stpush": 0x5A, "stpop": 0x5B, @@ -119,7 +129,6 @@ class ControlUnit(AbstractControlUnit): OPCODES["stdup"], OPCODES["stswap"]} BINAR_OPCODES = ARITHMETIC_OPCODES | {OPCODES["comp"]} - MONAR_OPCODES = {OPCODES["halt"]} register_names = {"PC": "PC", "ADDR": "ADDR", "RI": "RI"} opcode = 0 @@ -231,10 +240,11 @@ class ControlUnit3(ControlUnit): "R1": "R1", "R2": "R2", "S": "S", "RES": "R1", "FLAGS": "FLAGS"} - def __init__(self, instruction_size, *vargs, **kvargs): + def __init__(self, ir_size, *vargs, **kvargs): """See help(type(x)).""" - super().__init__(instruction_size, *vargs, **kvargs) - self.instruction_size = instruction_size + super().__init__(ir_size, *vargs, **kvargs) + + self.instruction_size = ir_size self.opcodes = (self.ARITHMETIC_OPCODES | self.JUMP_OPCODES | {self.OPCODES["move"], self.OPCODES["halt"]}) @@ -309,12 +319,11 @@ class ControlUnit2(ControlUnit): "R1": "R1", "R2": "R2", "S": "R1", "RES": "R2", "FLAGS": "FLAGS"} - def __init__(self, instruction_size, *vargs, **kvargs): + def __init__(self, ir_size, *vargs, **kvargs): """See help(type(x)).""" - super().__init__(instruction_size, *vargs, **kvargs) - self.instruction_size = instruction_size + super().__init__(ir_size, *vargs, **kvargs) - self.instruction_size = instruction_size + self.instruction_size = ir_size self.opcodes = (self.ARITHMETIC_OPCODES | self.JUMP_OPCODES | {self.OPCODES["move"], self.OPCODES["halt"], @@ -473,12 +482,11 @@ class ControlUnit1(ControlUnit): "R1": "S", "R2": "R", "S": "S", "RES": "S1", "FLAGS": "FLAGS"} - def __init__(self, instruction_size, *vargs, **kvargs): + def __init__(self, ir_size, *vargs, **kvargs): """See help(type(x)).""" - super().__init__(instruction_size, *vargs, **kvargs) - self.instruction_size = instruction_size + super().__init__(ir_size, *vargs, **kvargs) - self.instruction_size = instruction_size + self.instruction_size = ir_size self.opcodes = (self.ARITHMETIC_OPCODES | self.JUMP_OPCODES | {self.OPCODES["load"], self.OPCODES["store"], @@ -537,137 +545,153 @@ class ControlUnit1(ControlUnit): self.operand_size) self.ram.put(self.address, value, self.operand_size) -class ControlUnitS(ControlUnit): +class ControlUnitM(ControlUnit): - """Control unit for stack model machine.""" + """Control unit for address modification model machine.""" - address = None + address = 0 + register1 = '' + register2 = '' - register_names = {"PC": "PC", "ADDR": "ADDR", "RI": "RI", "SP": "SP", - "R1": "R1", "R2": "R2", "S": "R1", "RES": "R2", + register_names = {"PC": "PC", "ADDR": "ADDR", "RI": "RI", + "R1": "S", "R2": "RZ", "S": "S", "RES": "RZ", "FLAGS": "FLAGS"} + + REGISTER_OPCODES = {ControlUnit.OPCODES["radd"], + ControlUnit.OPCODES["rsub"], + ControlUnit.OPCODES["rsmul"], + ControlUnit.OPCODES["rsdivmod"], + ControlUnit.OPCODES["rumul"], + ControlUnit.OPCODES["rudivmod"], + ControlUnit.OPCODES["rmove"], + ControlUnit.OPCODES["rcomp"]} + + ARITHMETIC_OPCODES = (ControlUnit.ARITHMETIC_OPCODES + | {ControlUnit.OPCODES["radd"], + ControlUnit.OPCODES["rsub"], + ControlUnit.OPCODES["rsmul"], + ControlUnit.OPCODES["rsdivmod"], + ControlUnit.OPCODES["rumul"], + ControlUnit.OPCODES["rudivmod"]}) + def __init__(self, ir_size, *vargs, **kvargs): """See help(type(x)).""" # dynamic instruction size super().__init__(ir_size, *vargs, **kvargs) - self.opcodes = (self.ARITHMETIC_OPCODES | self.JUMP_OPCODES | - {self.OPCODES["stpush"], self.OPCODES["stpop"], - self.OPCODES["stdup"], self.OPCODES["stswap"], - self.OPCODES["halt"], - self.OPCODES["comp"]}) + self.reg_addr_size = 4 - for reg in {"R1", "R2", "FLAGS"}: + self.opcodes = (self.ARITHMETIC_OPCODES + | self.JUMP_OPCODES + | self.REGISTER_OPCODES + | {self.OPCODES["load"], + self.OPCODES["store"], + self.OPCODES["halt"], + self.OPCODES["comp"]}) + + for reg in {"S", "RZ", "FLAGS", "R0", "R1", "R2", "R3", "R4", + "R5", "R6", "R7", "R8", "R9", "RA", "RB", "RC", + "RD", "RE", "RF"}: self.registers.add_register(reg, self.operand_size) - for reg in {"SP"}: - self.registers.add_register(reg, self.address_size) - self.registers.put("SP", 0, self.address_size) - - def push(self, value): - """Push value to stack.""" - stack_pointer = self.registers.fetch(self.register_names["SP"], - self.address_size) - stack_pointer -= self.operand_size // self.ram.word_size - stack_pointer %= self.ram.memory_size - self.registers.put(self.register_names["SP"], - stack_pointer, - self.address_size) - self.ram.put(stack_pointer, value, self.operand_size) - - def pop(self): - """Pop value from the stack.""" - stack_pointer = self.registers.fetch(self.register_names["SP"], - self.address_size) - value = self.ram.fetch(stack_pointer, self.operand_size) - stack_pointer += self.operand_size // self.ram.word_size - stack_pointer %= self.ram.memory_size - self.registers.put(self.register_names["SP"], - stack_pointer, - self.address_size) - return value def fetch_and_decode(self): """Fetch 3 addresses.""" - mask = 2 ** self.address_size - 1 - one_operand = self.JUMP_OPCODES | {self.OPCODES["stpush"], - self.OPCODES["stpop"]} + addr_mask = 2 ** self.address_size - 1 + reg_mask = 2 ** self.reg_addr_size - 1 instruction_pointer = self.registers.fetch(self.register_names["PC"], self.address_size) - self.opcode = self.ram.fetch(instruction_pointer, self.OPCODE_SIZE) - if self.opcode in one_operand: - instruction_size = self.OPCODE_SIZE + self.address_size + batch_size = max(self.ram.word_size, self.OPCODE_SIZE) + self.opcode = self.ram.fetch(instruction_pointer, batch_size) + space_size = batch_size - self.OPCODE_SIZE + self.opcode = Integer(self.opcode, batch_size, False)[space_size:].get_value() + + if self.opcode in self.opcodes - (self.REGISTER_OPCODES | {self.OPCODES["halt"]}): + instruction_size = self.OPCODE_SIZE + 2 * self.reg_addr_size + self.address_size else: - instruction_size = self.OPCODE_SIZE + instruction_size = self.OPCODE_SIZE + 2 * self.reg_addr_size instruction = self.fetch_instruction(instruction_size) - if self.opcode in one_operand: - self.address = instruction & mask + if self.opcode in self.REGISTER_OPCODES: + r_x = (instruction >> self.reg_addr_size) & reg_mask + self.register1 = "R" + hex(r_x).upper()[2:] + + r_y = instruction & reg_mask + self.register2 = "R" + hex(r_y).upper()[2:] + elif self.opcode != self.OPCODES["halt"]: + r_x = (instruction >> (self.reg_addr_size + self.address_size)) & reg_mask + self.register1 = "R" + hex(r_x).upper()[2:] + + modificator = "R" + hex((instruction >> self.address_size) & reg_mask).upper()[2:] + if modificator != "R0": + modificator = self.registers.fetch(modificator, self.operand_size) + else: + modificator = 0 + self.address = (instruction + modificator) & addr_mask def load(self): """Load registers R1 and R2.""" - if self.opcode in self.BINAR_OPCODES | {self.OPCODES["stswap"]}: - operand2 = self.pop() - self.registers.put(self.register_names["R2"], - operand2, - self.operand_size) - operand1 = self.pop() + if self.opcode == self.OPCODES["store"]: + operand1 = self.registers.fetch(self.register1, self.operand_size) self.registers.put(self.register_names["R1"], operand1, self.operand_size) - - elif self.opcode == self.OPCODES["stpush"]: - operand = self.ram.fetch(self.address, self.operand_size) + elif self.opcode in self.REGISTER_OPCODES: + operand1 = self.registers.fetch(self.register1, self.operand_size) self.registers.put(self.register_names["R1"], - operand, + operand1, self.operand_size) - elif self.opcode in {self.OPCODES["stdup"], self.OPCODES["stpop"]}: - operand = self.pop() + operand2 = self.registers.fetch(self.register2, self.operand_size) + self.registers.put(self.register_names["R2"], + operand2, + self.operand_size) + elif self.opcode in (self.ARITHMETIC_OPCODES | + {self.OPCODES["comp"], self.OPCODES["load"]}): + operand1 = self.registers.fetch(self.register1, self.operand_size) self.registers.put(self.register_names["R1"], - operand, + operand1, + self.operand_size) + operand2 = self.ram.fetch(self.address, self.operand_size) + self.registers.put(self.register_names["R2"], + operand2, self.operand_size) elif self.opcode in self.JUMP_OPCODES: self.registers.put(self.register_names["ADDR"], self.address, self.address_size) + if self.opcode in self.REGISTER_OPCODES: + self.opcode ^= 0x20 + def execute(self): """Add specific commands: conditional jumps and cmp.""" if self.opcode == self.OPCODES["comp"]: self.alu.sub() + elif self.opcode == self.OPCODES["load"]: + self.alu.move("R2", "S") + elif self.opcode == self.OPCODES["store"]: + self.alu.move("R1", "S") elif self.opcode in self.JUMP_OPCODES: self.execute_jump() - elif self.opcode == self.OPCODES["stswap"]: - self.alu.swap() - elif self.opcode == self.OPCODES["stdup"]: - self.alu.move(source="R1", dest="R2") - elif self.opcode in self.STACK_OPCODES: - pass - elif self.opcode == self.OPCODES["move"]: - raise ValueError('Invalid opcode `{opcode}`' - .format(opcode=hex(self.opcode))) else: super().execute() def write_back(self): """Write result back.""" - if self.opcode in self.ARITHMETIC_OPCODES | {self.OPCODES["stpush"], - self.OPCODES["stswap"], - self.OPCODES["stdup"]}: + if self.opcode in self.ARITHMETIC_OPCODES | {self.OPCODES["load"]}: value = self.registers.fetch(self.register_names["S"], self.operand_size) - self.push(value) - if self.opcode in self.DIVMOD_OPCODES | {self.OPCODES["stswap"], - self.OPCODES["stdup"]}: + self.registers.put(self.register1, value, self.operand_size) + if self.opcode in self.DIVMOD_OPCODES: + next_register = (int(self.register1[1:], 0x10) + 1) % 0x10 + next_register = "R" + hex(next_register).upper()[2:] value = self.registers.fetch(self.register_names["RES"], self.operand_size) - self.push(value) - elif self.opcode == self.OPCODES["stpop"]: - value = self.registers.fetch(self.register_names["R1"], + self.registers.put(next_register, value, self.operand_size) + elif self.opcode == self.OPCODES["store"]: + value = self.registers.fetch(self.register_names["S"], self.operand_size) self.ram.put(self.address, value, self.operand_size) - - diff --git a/modelmachine/ide.py b/modelmachine/ide.py index a10f040..a3fddad 100644 --- a/modelmachine/ide.py +++ b/modelmachine/ide.py @@ -136,6 +136,7 @@ def exec_continue(cpu, step): def exec_print(cpu, step): """Print contents of registers.""" + print("RAM access count:", cpu.ram.access_count) print("Register states:") registers = {cpu.register_names[name] for name in cpu.register_names} for reg in sorted(list(registers)): diff --git a/modelmachine/io.py b/modelmachine/io.py index 3328907..0367def 100644 --- a/modelmachine/io.py +++ b/modelmachine/io.py @@ -44,11 +44,11 @@ class InputOutputUnit: def store_hex(self, start, size): """Save data to string.""" - if size % self.word_size != 0: + if size % self.ram.word_size != 0: raise KeyError('Cannot save {size} bits, word size is {word_size}' - .format(size=size, word_size=self.word_size)) + .format(size=size, word_size=self.ram.word_size)) result = [] - block_size = self.word_size + block_size = self.ram.word_size size //= block_size for i in range(start, start + size): data = self.ram.fetch(i, block_size) diff --git a/modelmachine/memory.py b/modelmachine/memory.py index c0ec03a..4fe3c7d 100644 --- a/modelmachine/memory.py +++ b/modelmachine/memory.py @@ -58,6 +58,7 @@ class AbstractMemory(dict): super().__init__(addresses) self.word_size = word_size + self.access_count = 0 if endianess == "big": self.decode, self.encode = big_endian_decode, big_endian_encode @@ -106,6 +107,8 @@ class AbstractMemory(dict): self.check_address(address) self.check_bits_count(address, bits) + self.access_count += 1 + size = bits // self.word_size if size == 1: # Address not always is integer, sometimes string return self[address] @@ -123,6 +126,8 @@ class AbstractMemory(dict): enc_value = self.encode(value, self.word_size, bits) + self.access_count += 1 + size = bits // self.word_size if size == 1: # Address not always is integer, sometimes string self[address] = value diff --git a/modelmachine/numeric.py b/modelmachine/numeric.py index 62058b1..54c2c7b 100644 --- a/modelmachine/numeric.py +++ b/modelmachine/numeric.py @@ -87,3 +87,20 @@ class Integer(Number): """Test if two integer is equal.""" self.check_compability(other) return self.get_value() == other.get_value() + + def __getitem__(self, key): + """Get bits of unsigned representation. + + Zero-indexed bit is minor. + """ + representation = [(self.value >> i) & 1 for i in range(self.size)] + representation = representation[key] + if isinstance(representation, int): + return Integer(representation, 1, False) + elif isinstance(representation, list): + value = 0 + for i in range(len(representation)): + value += representation[i] << i + return Integer(value, len(representation), False) + else: + raise TypeError("Integer indeces must be integers") diff --git a/samples/mmm_sample.mmach b/samples/mmm_sample.mmach new file mode 100644 index 0000000..26516ea --- /dev/null +++ b/samples/mmm_sample.mmach @@ -0,0 +1,21 @@ +mmm + +[config] +input = 0x100,0x102 +output = 0x104 + +[code] +; x = ((a * -21) % 50 - b) ** 2 == 178929 +00 0 0 0100 ; R0 := a +03 0 0 000C ; R0 := a * -21 +04 0 0 000E ; R0 := (a * -21) / 50, R1 := x = (a * -21) % 50 +02 1 0 0102 ; R1 := x - b +23 1 1 ; R1 := (x - b) ** 2 +10 1 0 0104 ; [0104] := R1 +99 0 0 ; halt +; --------------------- +FFFFFFEB ; -21 +00000032 ; 50 + +[input] +-123 456 diff --git a/setup.py b/setup.py index 069ec0b..abf27a9 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ Read the doc: <https://github.com/vslutov/modelmachine> from setuptools import setup, find_packages -VERSION = "0.0.6" # Don't forget fix in __main__.py +VERSION = "0.1.0" # Don't forget fix in __main__.py setup(name='modelmachine', version=VERSION,
Регистровая машина
cmc-python/modelmachine
diff --git a/modelmachine/tests/test_cpu.py b/modelmachine/tests/test_cpu.py index 6976f72..b6cee81 100644 --- a/modelmachine/tests/test_cpu.py +++ b/modelmachine/tests/test_cpu.py @@ -5,7 +5,7 @@ from modelmachine.cpu import AbstractCPU from modelmachine.cpu import CPUMM3, CPUMM2 from modelmachine.cpu import CPUMMV, CPUMM1 -from modelmachine.cpu import CPUMMS +from modelmachine.cpu import CPUMMM from modelmachine.memory import RandomAccessMemory, RegisterMemory @@ -236,35 +236,30 @@ class TestCPUMM1: assert out.read() == "298\n" +class TestCPUMMM: -class TestCPUMMS: - - """Smoke test for mm-s.""" + """Smoke test for mm-m.""" cpu = None source = None def setup(self): """Init state.""" - self.cpu = CPUMMS(protect_memory=False) + self.cpu = CPUMMM(protect_memory=False) self.source = ("[config]\n" + - "input=0x100,0x103\n" + - "output=0x106\n" + + "input=0x100,0x102\n" + + "output=0x104\n" + "[code]\n" + - "5A 0100\n" + - "5A 0103\n" + - "01\n" + - "5C\n" + - "5A 0103\n" + - "05\n" + - "86 0011\n" + - "5C\n" + - "02 ; never be used\n" + - "5A 001b\n" + - "02\n" + - "5B 0106\n" + - "99 0000\n" + - "000002\n" + + "00 0 0 0100\n" + + "03 0 0 000C\n" + + "04 0 0 000E\n" + + "02 1 0 0102\n" + + "23 1 1; coment never be used\n" + + "10 1 0 0104\n" + + "99 0 0\n" + + "; -----------\n" + "ffffffeb\n" + + "00000032\n" + "[input]\n" + "100 200\n") @@ -276,5 +271,5 @@ class TestCPUMMS: with open(str(out), 'w') as output: self.cpu.run_file(str(source), output=output) - assert out.read() == "298\n" + assert out.read() == "40000\n" diff --git a/modelmachine/tests/test_cu_abstract.py b/modelmachine/tests/test_cu_abstract.py index 3abb258..6169d0e 100644 --- a/modelmachine/tests/test_cu_abstract.py +++ b/modelmachine/tests/test_cu_abstract.py @@ -21,6 +21,11 @@ OP_COMP = 0x05 OP_STORE = 0x10 OP_UMUL, OP_UDIVMOD = 0x13, 0x14 OP_SWAP = 0x20 +OP_RMOVE = 0x20 +OP_RADD, OP_RSUB = 0x21, 0x22 +OP_RSMUL, OP_RSDIVMOD = 0x23, 0x24 +OP_RCOMP = 0x25 +OP_RUMUL, OP_RUDIVMOD = 0x33, 0x34 OP_STPUSH, OP_STPOP, OP_STDUP, OP_STSWAP = 0x5A, 0x5B, 0x5C, 0x5D OP_JUMP = 0x80 OP_JEQ, OP_JNEQ = 0x81, 0x82 @@ -32,33 +37,9 @@ ARITHMETIC_OPCODES = {OP_ADD, OP_SUB, OP_SMUL, OP_SDIVMOD, OP_UMUL, OP_UDIVMOD} CONDJUMP_OPCODES = {OP_JEQ, OP_JNEQ, OP_SJL, OP_SJGEQ, OP_SJLEQ, OP_SJG, OP_UJL, OP_UJGEQ, OP_UJLEQ, OP_UJG} - -def run_fetch(test_case, value, opcode, instruction_size, and_decode=True): - """Run one fetch test.""" - address = 10 - test_case.ram.put(address, value, instruction_size) - increment = instruction_size // test_case.ram.word_size - - test_case.registers.fetch.reset_mock() - test_case.registers.put.reset_mock() - - def get_register(name, size): - """Get PC.""" - assert name == "PC" - assert size == BYTE_SIZE - return address - test_case.registers.fetch.side_effect = get_register - - if and_decode: - test_case.control_unit.fetch_and_decode() - else: - test_case.control_unit.fetch_instruction(instruction_size) - test_case.registers.fetch.assert_any_call("PC", BYTE_SIZE) - test_case.registers.put.assert_has_calls([call("RI", value, WORD_SIZE), - call("PC", address + increment, - BYTE_SIZE)]) - assert test_case.control_unit.opcode == opcode - +JUMP_OPCODES = CONDJUMP_OPCODES | {OP_JUMP} +REGISTER_OPCODES = {OP_RMOVE, OP_RADD, OP_RSUB, OP_RSMUL, + OP_RSDIVMOD, OP_RCOMP, OP_RUMUL, OP_RUDIVMOD} class TestAbstractControlUnit: @@ -137,6 +118,10 @@ class TestControlUnit: arithmetic_opcodes = None condjump_opcodes = None + ir_size = 32 + operand_size = WORD_SIZE + address_size = BYTE_SIZE + def setup(self): """Init state.""" self.ram = RandomAccessMemory(WORD_SIZE, 256, 'big') @@ -154,9 +139,9 @@ class TestControlUnit: """Test internal constants.""" assert isinstance(self.control_unit, AbstractControlUnit) assert isinstance(self.control_unit, ControlUnit) - assert self.control_unit.ir_size == 32 - assert self.control_unit.operand_size == WORD_SIZE - assert self.control_unit.address_size == BYTE_SIZE + assert self.control_unit.ir_size == self.ir_size + assert self.control_unit.operand_size == self.operand_size + assert self.control_unit.address_size == self.address_size assert self.control_unit.OPCODE_SIZE == BYTE_SIZE assert self.control_unit.OPCODES["move"] == OP_MOVE assert self.control_unit.OPCODES["load"] == OP_LOAD @@ -201,9 +186,36 @@ class TestControlUnit: with raises(NotImplementedError): self.control_unit.write_back() + def run_fetch(self, value, opcode, instruction_size, and_decode=True, + address_size=BYTE_SIZE, ir_size=WORD_SIZE): + """Run one fetch test.""" + address = 10 + self.ram.put(address, value, instruction_size) + increment = instruction_size // self.ram.word_size + + self.registers.fetch.reset_mock() + self.registers.put.reset_mock() + + def get_register(name, size): + """Get PC.""" + assert name == "PC" + assert size == self.control_unit.address_size + return address + self.registers.fetch.side_effect = get_register + + if and_decode: + self.control_unit.fetch_and_decode() + else: + self.control_unit.fetch_instruction(instruction_size) + self.registers.fetch.assert_any_call("PC", address_size) + self.registers.put.assert_has_calls([call("RI", value, ir_size), + call("PC", address + increment, + address_size)]) + assert self.control_unit.opcode == opcode + def test_fetch_instruction(self): """Right fetch and decode is a half of business.""" - run_fetch(self, 0x01020304, 0x01, WORD_SIZE, False) + self.run_fetch(0x01020304, 0x01, WORD_SIZE, False) def test_basic_execute(self, should_move=True): """Test basic operations.""" diff --git a/modelmachine/tests/test_cu_fixed.py b/modelmachine/tests/test_cu_fixed.py index 7e4ace0..ba59481 100644 --- a/modelmachine/tests/test_cu_fixed.py +++ b/modelmachine/tests/test_cu_fixed.py @@ -17,7 +17,7 @@ from .test_cu_abstract import (BYTE_SIZE, WORD_SIZE, OP_MOVE, OP_SDIVMOD, OP_LOAD, OP_STORE, OP_SWAP, OP_JNEQ, OP_SJL, OP_SJGEQ, OP_SJLEQ, OP_SJG, OP_UJL, OP_UJGEQ, OP_UJLEQ, OP_UJG, OP_HALT, - ARITHMETIC_OPCODES, CONDJUMP_OPCODES, run_fetch) + ARITHMETIC_OPCODES, CONDJUMP_OPCODES) from .test_cu_abstract import TestControlUnit as TBCU class TestControlUnit3(TBCU): @@ -45,13 +45,13 @@ class TestControlUnit3(TBCU): """Right fetch and decode is a half of business.""" for opcode in self.control_unit.opcodes: self.control_unit.address1, self.control_unit.address2 = None, None - run_fetch(self, opcode << 24 | 0x020304, opcode, WORD_SIZE) + self.run_fetch(opcode << 24 | 0x020304, opcode, WORD_SIZE) assert self.control_unit.address1 == 0x02 assert self.control_unit.address2 == 0x03 assert self.control_unit.address3 == 0x04 for opcode in set(range(2 ** BYTE_SIZE)) - self.control_unit.opcodes: with raises(ValueError): - run_fetch(self, opcode << 24 | 0x020304, opcode, WORD_SIZE) + self.run_fetch(opcode << 24 | 0x020304, opcode, WORD_SIZE) def test_load(self): """R1 := [A1], R2 := [A2].""" @@ -131,14 +131,14 @@ class TestControlUnit3(TBCU): self.run_cond_jump(OP_UJLEQ, False, LESS, True) self.run_cond_jump(OP_UJG, False, GREATER, False) - def test_jump_halt(self): + def test_execute_jump_halt(self): """Test for jump and halt.""" self.alu.cond_jump.reset_mock() self.alu.sub.reset_mock() self.registers.put.reset_mock() + self.control_unit.opcode = OP_JUMP self.control_unit.execute() - assert not self.alu.sub.called assert not self.registers.put.called self.alu.jump.assert_called_once_with() @@ -264,12 +264,12 @@ class TestControlUnit2(TestControlUnit3): """Right fetch and decode is a half of business.""" for opcode in self.control_unit.opcodes: self.control_unit.address1, self.control_unit.address2 = None, None - run_fetch(self, opcode << 24 | 0x0203, opcode, WORD_SIZE) + self.run_fetch(opcode << 24 | 0x0203, opcode, WORD_SIZE) assert self.control_unit.address1 == 0x02 assert self.control_unit.address2 == 0x03 for opcode in set(range(2 ** BYTE_SIZE)) - self.control_unit.opcodes: with raises(ValueError): - run_fetch(self, opcode << 24 | 0x0203, opcode, WORD_SIZE) + self.run_fetch(opcode << 24 | 0x0203, opcode, WORD_SIZE) def test_load(self): """R1 := [A1], R2 := [A2].""" @@ -319,24 +319,6 @@ class TestControlUnit2(TestControlUnit3): assert not self.registers.put.called self.alu.cond_jump.assert_called_once_with(signed, mol, equal) - def test_execute_jump_halt(self): - """Test for jump and halt.""" - self.alu.cond_jump.reset_mock() - self.alu.sub.reset_mock() - self.registers.put.reset_mock() - - self.control_unit.opcode = OP_JUMP - self.control_unit.execute() - assert not self.alu.sub.called - assert not self.registers.put.called - self.alu.jump.assert_called_once_with() - - self.control_unit.opcode = OP_HALT - self.control_unit.execute() - assert not self.alu.sub.called - assert not self.registers.put.called - self.alu.halt.assert_called_once_with() - def test_execute_comp(self): """Test for comp.""" self.alu.cond_jump.reset_mock() @@ -472,11 +454,11 @@ class TestControlUnit1(TestControlUnit2): """Right fetch and decode is a half of business.""" for opcode in set(range(2 ** BYTE_SIZE)) - self.control_unit.opcodes: with raises(ValueError): - run_fetch(self, opcode << 24, opcode, WORD_SIZE) + self.run_fetch(opcode << 24, opcode, WORD_SIZE) for opcode in self.control_unit.opcodes: self.control_unit.address = None - run_fetch(self, opcode << 24 | 0x02, opcode, WORD_SIZE) + self.run_fetch(opcode << 24 | 0x02, opcode, WORD_SIZE) assert self.control_unit.address == 0x02 def test_load(self): diff --git a/modelmachine/tests/test_cu_variable.py b/modelmachine/tests/test_cu_variable.py index f06a70c..4b500ca 100644 --- a/modelmachine/tests/test_cu_variable.py +++ b/modelmachine/tests/test_cu_variable.py @@ -4,7 +4,7 @@ from modelmachine.cu import RUNNING, HALTED from modelmachine.cu import ControlUnitV -from modelmachine.cu import ControlUnitS +from modelmachine.cu import ControlUnitM from modelmachine.memory import RegisterMemory, RandomAccessMemory from modelmachine.alu import ArithmeticLogicUnit @@ -14,9 +14,14 @@ from pytest import raises from .test_cu_abstract import (BYTE_SIZE, WORD_SIZE, OP_MOVE, OP_COMP, OP_SDIVMOD, OP_UDIVMOD, OP_STPUSH, OP_STPOP, + OP_LOAD, OP_STORE, OP_RMOVE, + OP_RADD, OP_RSUB, OP_RSMUL, OP_RSDIVMOD, + OP_RCOMP, OP_RUMUL, OP_RUDIVMOD, OP_STDUP, OP_STSWAP, OP_JUMP, OP_HALT, - ARITHMETIC_OPCODES, CONDJUMP_OPCODES, run_fetch) + ARITHMETIC_OPCODES, CONDJUMP_OPCODES, + JUMP_OPCODES, REGISTER_OPCODES) from .test_cu_fixed import TestControlUnit2 as TBCU2 +from .test_cu_abstract import TestControlUnit as TBCU class TestControlUnitV(TBCU2): @@ -45,23 +50,23 @@ class TestControlUnitV(TBCU2): """Right fetch and decode is a half of business.""" for opcode in set(range(2 ** BYTE_SIZE)) - self.control_unit.opcodes: with raises(ValueError): - run_fetch(self, opcode, opcode, BYTE_SIZE) + self.run_fetch(opcode, opcode, BYTE_SIZE) for opcode in ARITHMETIC_OPCODES | {OP_COMP, OP_MOVE}: self.control_unit.address1, self.control_unit.address2 = None, None - run_fetch(self, opcode << 16 | 0x0203, opcode, 24) + self.run_fetch(opcode << 16 | 0x0203, opcode, 24) assert self.control_unit.address1 == 0x02 assert self.control_unit.address2 == 0x03 for opcode in CONDJUMP_OPCODES | {OP_JUMP}: self.control_unit.address1, self.control_unit.address2 = None, None - run_fetch(self, opcode << 8 | 0x02, opcode, 16) + self.run_fetch(opcode << 8 | 0x02, opcode, 16) assert self.control_unit.address1 == 0x02 assert self.control_unit.address2 == None for opcode in {OP_HALT}: self.control_unit.address1, self.control_unit.address2 = None, None - run_fetch(self, opcode, opcode, 8) + self.run_fetch(opcode, opcode, 8) assert self.control_unit.address1 == None assert self.control_unit.address2 == None @@ -175,322 +180,387 @@ class TestControlUnitV(TBCU2): assert self.registers.fetch("PC", BYTE_SIZE) == 0x01 assert self.control_unit.get_status() == HALTED +class TestControlUnitM(TBCU2): -class TestControlUnitS(TBCU2): - - """Test case for Stack Model Machine Control Unit.""" + """Test case for Address Modification Model Machine Control Unit.""" def setup(self): """Init state.""" super().setup() - self.ram = RandomAccessMemory(BYTE_SIZE, 256, 'big', is_protected=True) - self.control_unit = ControlUnitS(WORD_SIZE, - BYTE_SIZE, + self.ram = RandomAccessMemory(2 * BYTE_SIZE, 2 ** WORD_SIZE, 'big', is_protected=True) + self.control_unit = ControlUnitM(WORD_SIZE, + 2 * BYTE_SIZE, self.registers, self.ram, self.alu, WORD_SIZE) - assert self.control_unit.opcodes == {0x01, 0x02, 0x03, 0x04, - 0x13, 0x14, - 0x05, - 0x5A, 0x5B, 0x5C, 0x5D, + self.operand_size = WORD_SIZE + self.address_size = 2 * BYTE_SIZE + assert self.control_unit.opcodes == {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x10, 0x13, 0x14, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, + 0x33, 0x34, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x93, 0x94, 0x95, 0x96, 0x99} + def test_const(self): + super().test_const() + assert self.control_unit.OPCODES["rmove"] == OP_RMOVE + assert self.control_unit.OPCODES["radd"] == OP_RADD + assert self.control_unit.OPCODES["rsub"] == OP_RSUB + assert self.control_unit.OPCODES["rsmul"] == OP_RSMUL + assert self.control_unit.OPCODES["rsdivmod"] == OP_RSDIVMOD + assert self.control_unit.OPCODES["rcomp"] == OP_RCOMP + assert self.control_unit.OPCODES["rumul"] == OP_RUMUL + assert self.control_unit.OPCODES["rudivmod"] == OP_RUDIVMOD + + def run_fetch(self, value, opcode, instruction_size, r2=True): + """Run one fetch test.""" + address1 = 10 + address2=42 + self.ram.put(address1, value, instruction_size) + increment = instruction_size // self.ram.word_size + + self.registers.fetch.reset_mock() + self.registers.put.reset_mock() + + def get_register(name, size): + """Get PC.""" + if name == "PC": + assert size == 2 * BYTE_SIZE + return address1 + elif name=="R2": + assert size == WORD_SIZE + return address2 + else: + raise KeyError() + + self.registers.fetch.side_effect = get_register + + self.control_unit.fetch_and_decode() + if r2: + self.registers.fetch.assert_has_calls([call("PC", 2 * BYTE_SIZE), + call("R2", WORD_SIZE)]) + else: + self.registers.fetch.assert_any_call("PC", 2 * BYTE_SIZE) + self.registers.put.assert_has_calls([call("RI", value, WORD_SIZE), + call("PC", address1 + increment, + 2 * BYTE_SIZE)]) + assert self.control_unit.opcode == opcode + def test_fetch_and_decode(self): """Right fetch and decode is a half of business.""" for opcode in set(range(2 ** BYTE_SIZE)) - self.control_unit.opcodes: with raises(ValueError): - run_fetch(self, opcode, opcode, BYTE_SIZE) + self.run_fetch(opcode << BYTE_SIZE, opcode, 2 * BYTE_SIZE) - for opcode in ARITHMETIC_OPCODES | {OP_COMP, OP_STDUP, OP_STSWAP, - OP_HALT}: + for opcode in ARITHMETIC_OPCODES | JUMP_OPCODES | {OP_COMP, OP_LOAD, OP_STORE}: + self.control_unit.register1 = None + self.control_unit.register2 = None + self.control_unit.address = None + self.run_fetch(opcode << 24 | 0x120014, opcode, 32) + + assert self.control_unit.register1 == 'R1' + assert self.control_unit.register2 is None + assert self.control_unit.address == 0x14 + 42 + + for opcode in REGISTER_OPCODES: + self.control_unit.register1 = None + self.control_unit.register2 = None self.control_unit.address = None - run_fetch(self, opcode, opcode, BYTE_SIZE) - assert self.control_unit.address == None - for opcode in CONDJUMP_OPCODES | {OP_STPUSH, OP_STPOP, OP_JUMP}: + self.run_fetch(opcode << 8 | 0x12, opcode, 16, r2=False) + + assert self.control_unit.register1 == 'R1' + assert self.control_unit.register2 == 'R2' + assert self.control_unit.address is None + + for opcode in {OP_HALT}: + self.control_unit.register1 = None + self.control_unit.register2 = None self.control_unit.address = None - run_fetch(self, opcode << 8 | 0x02, opcode, 16) - assert self.control_unit.address == 0x02 - def test_push(self): - """Test basic stack operation.""" - self.registers.put.reset_mock() - self.registers.fetch.reset_mock() - address, value, size = 10, 123, WORD_SIZE // self.ram.word_size - self.registers.fetch.return_value = address - self.control_unit.push(value) - assert self.ram.fetch(address - size, WORD_SIZE) == value - self.registers.fetch.assert_called_once_with("SP", BYTE_SIZE) - self.registers.put.assert_called_once_with("SP", - address - size, - BYTE_SIZE) - - def test_pop(self): - """Test basic stack operation.""" - self.registers.put.reset_mock() - self.registers.fetch.reset_mock() - address, value, size = 10, 123, WORD_SIZE // self.ram.word_size - self.ram.put(address, value, WORD_SIZE) - self.registers.fetch.return_value = address - assert self.control_unit.pop() == value - self.registers.fetch.assert_called_once_with("SP", BYTE_SIZE) - self.registers.put.assert_called_once_with("SP", - address + size, - BYTE_SIZE) + self.run_fetch(opcode << 8 | 0x12, opcode, 16, r2=False) + + + assert self.control_unit.register1 is None + assert self.control_unit.register2 is None + assert self.control_unit.address is None def test_load(self): """R1 := [A1], R2 := [A2].""" - address, val1, val2, val3 = 10, 1, 2, 3 - stack = [] - def pop(): - """Pop mock.""" - return stack.pop() - self.control_unit.pop = create_autospec(self.control_unit.pop) - self.control_unit.pop.side_effect = pop + register1, val1 = 'R3', 123456 + register2, val2 = 'R4', 654321 + address, val3 = 10, 111111 + + def get_register(name, size): + """Get PC.""" + assert size == WORD_SIZE + if name == register1: + return val1 + elif name == register2: + return val2 + else: + raise KeyError() + + self.registers.fetch.side_effect = get_register self.control_unit.address = address + self.control_unit.register1 = register1 + self.control_unit.register2 = register2 self.ram.put(address, val3, WORD_SIZE) - for opcode in ARITHMETIC_OPCODES | {OP_COMP, OP_STSWAP}: + for opcode in ARITHMETIC_OPCODES | {OP_LOAD, OP_COMP}: + self.registers.fetch.reset_mock() self.registers.put.reset_mock() - self.control_unit.pop.reset_mock() - stack = [val1, val2] + self.control_unit.opcode = opcode self.control_unit.load() - self.control_unit.pop.assert_has_calls([call(), call()]) - self.registers.put.assert_has_calls([call("R1", val1, WORD_SIZE), - call("R2", val2, WORD_SIZE)], - True) + self.registers.fetch.assert_called_once_with(register1, WORD_SIZE) + self.registers.put.assert_has_calls([call("S", val1, WORD_SIZE), + call("RZ", val3, WORD_SIZE)]) - for opcode in {OP_STPOP, OP_STDUP}: + for opcode in {OP_STORE}: + self.registers.fetch.reset_mock() self.registers.put.reset_mock() - self.control_unit.pop.reset_mock() - stack = [val1] + self.control_unit.opcode = opcode self.control_unit.load() - self.control_unit.pop.assert_called_once_with() - self.registers.put.assert_called_once_with("R1", val1, WORD_SIZE) + self.registers.fetch.assert_called_once_with(register1, WORD_SIZE) + self.registers.put.assert_called_once_with("S", val1, WORD_SIZE) - for opcode in CONDJUMP_OPCODES | {OP_JUMP}: + for opcode in REGISTER_OPCODES: + self.registers.fetch.reset_mock() self.registers.put.reset_mock() - self.control_unit.pop.reset_mock() + self.control_unit.opcode = opcode self.control_unit.load() - assert not self.control_unit.pop.called - self.registers.put.assert_called_once_with("ADDR", address, BYTE_SIZE) + self.registers.fetch.assert_has_calls([call(register1, WORD_SIZE), + call(register2, WORD_SIZE)]) + self.registers.put.assert_has_calls([call("S", val1, WORD_SIZE), + call("RZ", val2, WORD_SIZE)]) - for opcode in {OP_STPUSH}: + for opcode in CONDJUMP_OPCODES | {OP_JUMP}: + self.registers.fetch.reset_mock() self.registers.put.reset_mock() - self.control_unit.pop.reset_mock() + self.control_unit.opcode = opcode self.control_unit.load() - assert not self.control_unit.pop.called - self.registers.put.assert_called_once_with("R1", val3, WORD_SIZE) + + assert not self.registers.fetch.called + self.registers.put.assert_called_once_with("ADDR", address, 2 * BYTE_SIZE) for opcode in {OP_HALT}: + self.registers.fetch.reset_mock() self.registers.put.reset_mock() - self.control_unit.pop.reset_mock() + self.control_unit.opcode = opcode + self.control_unit.load() - assert not self.control_unit.pop.called + assert not self.registers.fetch.called assert not self.registers.put.called def test_basic_execute(self, should_move=None): """Test basic operations.""" - super().test_basic_execute(should_move) + super().test_basic_execute(should_move=should_move) - def test_execute_stack(self): - """stpush, stpop, stdup and stswap.""" - self.alu.cond_jump.reset_mock() - self.alu.sub.reset_mock() - self.registers.put.reset_mock() - - self.control_unit.opcode = OP_STPUSH - self.control_unit.execute() - self.control_unit.opcode = OP_STPOP - self.control_unit.execute() - assert not self.alu.move.called - assert not self.alu.swap.called - - self.control_unit.opcode = OP_STDUP - self.control_unit.execute() - self.alu.move.assert_called_once_with(source="R1", dest="R2") + self.control_unit.opcode = OP_MOVE self.alu.move.reset_mock() - assert not self.alu.swap.called - - self.control_unit.opcode = OP_STSWAP self.control_unit.execute() - self.alu.swap.assert_called_once_with() - assert not self.alu.move.called - - assert not self.alu.sub.called - assert not self.registers.put.called + self.alu.move.assert_called_once_with('R2', 'S') def run_write_back(self, should, opcode): """Run write back method for specific opcode.""" - first, second, third, address = 11111111, 22222222, 3333333, 10 + + print(hex(opcode)) + + register1, next_register1, register2 = 'R5', 'R6', 'R8' + res_register1, val1 = 'S', 123456 + res_register2, val2 = 'RZ', 654321 + address, canary = 10, 0 + def get_register(name, size): - """Get result.""" - assert name in {"R1", "R2"} - assert size == WORD_SIZE - if name == "R1": - return second - elif name == "R2": - return third + """Get PC.""" + assert size == self.operand_size + if name == res_register1: + return val1 + elif name == res_register2: + return val2 + else: + raise KeyError() + self.registers.fetch.side_effect = get_register - self.registers.fetch.reset_mock() - self.ram.put(address, first, WORD_SIZE) self.control_unit.address = address - self.control_unit.push.reset_mock() + self.control_unit.register1 = register1 + self.control_unit.register2 = register2 + self.ram.put(address, canary, self.operand_size) + + self.registers.fetch.reset_mock() + self.registers.put.reset_mock() self.control_unit.opcode = opcode self.control_unit.write_back() - if should: - if opcode == OP_STPOP: - assert self.ram.fetch(address, WORD_SIZE) == second - elif opcode in {OP_SDIVMOD, OP_UDIVMOD, OP_STSWAP, OP_STDUP}: - self.control_unit.push.assert_has_calls([call(second), - call(third)]) - self.registers.fetch.assert_has_calls([call("R1", WORD_SIZE), - call("R2", WORD_SIZE)]) - assert self.ram.fetch(address, WORD_SIZE) == first - else: - self.control_unit.push.assert_called_once_with(second) - self.registers.fetch.assert_called_once_with("R1", WORD_SIZE) - assert self.ram.fetch(address, WORD_SIZE) == first + if should == 'two_registers': + self.registers.fetch.assert_has_calls([call(res_register1, self.operand_size), + call(res_register2, self.operand_size)]) + self.registers.put.assert_has_calls([call(register1, val1, self.operand_size), + call(next_register1, val2, self.operand_size)]) + assert self.ram.fetch(address, self.operand_size) == canary + + elif should == 'register': + self.registers.fetch.assert_called_once_with(res_register1, self.operand_size) + self.registers.put.assert_called_once_with(register1, val1, self.operand_size) + assert self.ram.fetch(address, self.operand_size) == canary + + elif should == 'memory': + self.registers.fetch.assert_called_once_with(res_register1, self.operand_size) + assert not self.registers.put.called + assert self.ram.fetch(address, self.operand_size) == val1 + else: - assert not self.control_unit.push.called assert not self.registers.fetch.called - assert self.ram.fetch(address, WORD_SIZE) == first + assert not self.registers.put.called + assert self.ram.fetch(address, self.operand_size) == canary def test_write_back(self): """Test write back result to the memory.""" - self.control_unit.push = create_autospec(self.control_unit.push) - for opcode in ARITHMETIC_OPCODES | {OP_STPOP, OP_STPUSH, OP_STSWAP, - OP_STDUP,}: - self.run_write_back(True, opcode) + for opcode in {OP_SDIVMOD, OP_UDIVMOD}: + self.run_write_back('two_registers', opcode) + + for opcode in (ARITHMETIC_OPCODES | {OP_LOAD}) - {OP_SDIVMOD, OP_UDIVMOD}: + self.run_write_back('register', opcode) + + for opcode in {OP_STORE}: + self.run_write_back('memory', opcode) for opcode in (CONDJUMP_OPCODES | - {OP_HALT, - OP_JUMP, - OP_COMP}): - self.run_write_back(False, opcode) + {OP_HALT, OP_JUMP, OP_COMP}): + self.run_write_back('nothing', opcode) def test_step(self): """Test step cycle.""" - size = WORD_SIZE // 8 - self.control_unit.registers = self.registers = RegisterMemory() - self.registers.add_register("RI", WORD_SIZE) - self.registers.add_register("SP", BYTE_SIZE) - self.registers.put("SP", 0, BYTE_SIZE) + for register in {'RI', 'RZ', 'S', 'R0', 'R1', 'R2', 'R3', 'R4', 'R5', 'R6', 'R7', 'R8', 'R9', 'RA', 'RB', 'RC', 'RD', 'RE', 'RF'}: + self.registers.add_register(register, self.operand_size) self.alu = ArithmeticLogicUnit(self.registers, self.control_unit.register_names, - WORD_SIZE, - BYTE_SIZE) + self.operand_size, + self.address_size) self.control_unit.alu = self.alu - self.ram.put(0x00, 0x5a0b, 2 * BYTE_SIZE) - self.ram.put(0x02, 0x5a0f, 2 * BYTE_SIZE) - self.ram.put(0x04, 0x01, 1 * BYTE_SIZE) - self.ram.put(0x05, 0x5c, 1 * BYTE_SIZE) - self.ram.put(0x06, 0x5a13, 2 * BYTE_SIZE) - self.ram.put(0x08, 0x05, 1 * BYTE_SIZE) - self.ram.put(0x09, 0x8617, 2 * BYTE_SIZE) - self.ram.put(0x0b, 12, WORD_SIZE) - self.ram.put(0x0f, 10, WORD_SIZE) - self.ram.put(0x13, 20, WORD_SIZE) - self.ram.put(0x17, 0x5b0b, 2 * BYTE_SIZE) - self.ram.put(0x19, 0x99, BYTE_SIZE) - self.registers.put("PC", 0, BYTE_SIZE) + canary = 0 + self.ram.put(0x0000, 0x00000100, WORD_SIZE) + self.ram.put(0x0002, 0x0300000C, WORD_SIZE) + self.ram.put(0x0004, 0x0400000E, WORD_SIZE) + self.ram.put(0x0006, 0x02100102, WORD_SIZE) + self.ram.put(0x0008, 0x2311, 2 * BYTE_SIZE) + self.ram.put(0x0009, 0x10100104, WORD_SIZE) + self.ram.put(0x000B, 0x9900, 2 * BYTE_SIZE) + self.ram.put(0x000C, 0xffffffeb, WORD_SIZE) + self.ram.put(0x000E, 0x00000032, WORD_SIZE) + self.ram.put(0x0100, -123 % 2 ** WORD_SIZE, WORD_SIZE) + self.ram.put(0x0102, 456, WORD_SIZE) + self.ram.put(0x0104, canary, WORD_SIZE) + self.registers.put("PC", 0, 2 * BYTE_SIZE) self.control_unit.step() - assert self.registers.fetch("PC", BYTE_SIZE) == 0x02 - assert self.registers.fetch("SP", BYTE_SIZE) == 2 ** BYTE_SIZE - size - self.control_unit.step() - assert self.registers.fetch("PC", BYTE_SIZE) == 0x04 - assert self.registers.fetch("SP", BYTE_SIZE) == 2 ** BYTE_SIZE - 2 * size - self.control_unit.step() - assert self.registers.fetch("PC", BYTE_SIZE) == 0x05 - assert self.registers.fetch("SP", BYTE_SIZE) == 2 ** BYTE_SIZE - size + assert self.registers.fetch("R0", WORD_SIZE) == -123 % 2 ** WORD_SIZE + assert self.registers.fetch("R1", WORD_SIZE) == 0 + assert self.registers.fetch("PC", 2 * BYTE_SIZE) == 0x02 + assert self.ram.fetch(0x0104, WORD_SIZE) == canary + assert self.control_unit.get_status() == RUNNING + self.control_unit.step() - assert self.registers.fetch("PC", BYTE_SIZE) == 0x06 - assert self.registers.fetch("SP", BYTE_SIZE) == 2 ** BYTE_SIZE - 2 * size + assert self.registers.fetch("R0", WORD_SIZE) == (21 * 123) + assert self.registers.fetch("R1", WORD_SIZE) == 0 + assert self.registers.fetch("PC", 2 * BYTE_SIZE) == 0x04 + assert self.ram.fetch(0x0104, WORD_SIZE) == canary + assert self.control_unit.get_status() == RUNNING + self.control_unit.step() - assert self.registers.fetch("PC", BYTE_SIZE) == 0x08 - assert self.registers.fetch("SP", BYTE_SIZE) == 2 ** BYTE_SIZE - 3 * size + assert self.registers.fetch("R0", WORD_SIZE) == (21 * 123) // 50 + x = 21 * 123 % 50 + assert self.registers.fetch("R1", WORD_SIZE) == x + assert self.registers.fetch("PC", 2 * BYTE_SIZE) == 0x06 + assert self.ram.fetch(0x0104, WORD_SIZE) == canary + assert self.control_unit.get_status() == RUNNING + self.control_unit.step() - assert self.registers.fetch("PC", BYTE_SIZE) == 0x09 - assert self.registers.fetch("SP", BYTE_SIZE) == 2 ** BYTE_SIZE - size + assert self.registers.fetch("R0", WORD_SIZE) == (21 * 123) // 50 + assert self.registers.fetch("R1", WORD_SIZE) == (x - 456) % 2 ** WORD_SIZE + assert self.registers.fetch("PC", 2 * BYTE_SIZE) == 0x08 + assert self.ram.fetch(0x0104, WORD_SIZE) == canary + assert self.control_unit.get_status() == RUNNING + self.control_unit.step() - assert self.ram.fetch(0x0b, WORD_SIZE) == 12 - assert self.registers.fetch("PC", BYTE_SIZE) == 0x17 - assert self.registers.fetch("SP", BYTE_SIZE) == 2 ** BYTE_SIZE - size + assert self.registers.fetch("R0", WORD_SIZE) == (21 * 123) // 50 + assert self.registers.fetch("R1", WORD_SIZE) == (x - 456) ** 2 + assert self.registers.fetch("PC", 2 * BYTE_SIZE) == 0x09 + assert self.ram.fetch(0x0104, WORD_SIZE) == canary + assert self.control_unit.get_status() == RUNNING + self.control_unit.step() - assert self.ram.fetch(0x0b, WORD_SIZE) == 22 - assert self.registers.fetch("PC", BYTE_SIZE) == 0x19 - assert self.registers.fetch("SP", BYTE_SIZE) == 0 + assert self.registers.fetch("R0", WORD_SIZE) == (21 * 123) // 50 + assert self.registers.fetch("R1", WORD_SIZE) == (x - 456) ** 2 + assert self.registers.fetch("PC", 2 * BYTE_SIZE) == 0x0b + assert self.ram.fetch(0x0104, WORD_SIZE) == (x - 456) ** 2 assert self.control_unit.get_status() == RUNNING + self.control_unit.step() - assert self.ram.fetch(0x0b, WORD_SIZE) == 22 - assert self.registers.fetch("PC", BYTE_SIZE) == 0x1a - assert self.registers.fetch("SP", BYTE_SIZE) == 0 + assert self.registers.fetch("R0", WORD_SIZE) == (21 * 123) // 50 + assert self.registers.fetch("R1", WORD_SIZE) == (x - 456) ** 2 + assert self.registers.fetch("PC", 2 * BYTE_SIZE) == 0x0C assert self.control_unit.get_status() == HALTED def test_run(self): """Very simple program.""" self.control_unit.registers = self.registers = RegisterMemory() - self.registers.add_register("RI", WORD_SIZE) - self.registers.add_register("SP", BYTE_SIZE) - self.registers.put("SP", 0, BYTE_SIZE) + for register in {'RI', 'RZ', 'S', 'R0', 'R1', 'R2', 'R3', 'R4', 'R5', 'R6', 'R7', 'R8', 'R9', 'RA', 'RB', 'RC', 'RD', 'RE', 'RF'}: + self.registers.add_register(register, self.operand_size) + self.alu = ArithmeticLogicUnit(self.registers, self.control_unit.register_names, - WORD_SIZE, - BYTE_SIZE) + self.operand_size, + self.address_size) self.control_unit.alu = self.alu - self.ram.put(0x00, 0x5a0b, 2 * BYTE_SIZE) - self.ram.put(0x02, 0x5a0f, 2 * BYTE_SIZE) - self.ram.put(0x04, 0x01, 1 * BYTE_SIZE) - self.ram.put(0x05, 0x5c, 1 * BYTE_SIZE) - self.ram.put(0x06, 0x5a13, 2 * BYTE_SIZE) - self.ram.put(0x08, 0x05, 1 * BYTE_SIZE) - self.ram.put(0x09, 0x8617, 2 * BYTE_SIZE) - self.ram.put(0x0b, 12, WORD_SIZE) - self.ram.put(0x0f, 10, WORD_SIZE) - self.ram.put(0x13, 20, WORD_SIZE) - self.ram.put(0x17, 0x5b0b, 2 * BYTE_SIZE) - self.ram.put(0x19, 0x99, BYTE_SIZE) - self.registers.put("PC", 0, BYTE_SIZE) + self.ram.put(0x0000, 0x00000100, WORD_SIZE) + self.ram.put(0x0002, 0x0300000C, WORD_SIZE) + self.ram.put(0x0004, 0x0400000E, WORD_SIZE) + self.ram.put(0x0006, 0x02100102, WORD_SIZE) + self.ram.put(0x0008, 0x2311, 2 * BYTE_SIZE) + self.ram.put(0x0009, 0x10100104, WORD_SIZE) + self.ram.put(0x000B, 0x9900, 2 * BYTE_SIZE) + self.ram.put(0x000C, 0xffffffeb, WORD_SIZE) + self.ram.put(0x000E, 0x00000032, WORD_SIZE) + self.ram.put(0x0100, 0xffffff85, WORD_SIZE) + self.ram.put(0x0102, 0x000001c8, WORD_SIZE) + self.registers.put("PC", 0, 2 * BYTE_SIZE) self.control_unit.run() - assert self.ram.fetch(0x0b, WORD_SIZE) == 22 - assert self.registers.fetch("PC", BYTE_SIZE) == 0x1a - assert self.registers.fetch("SP", BYTE_SIZE) == 0 + assert self.ram.fetch(0x0104, WORD_SIZE) == 178929 + assert self.registers.fetch("PC", 2 * BYTE_SIZE) == 0x000C assert self.control_unit.get_status() == HALTED def test_minimal_run(self): - """Very simple program.""" + """Minimal program.""" self.control_unit.registers = self.registers = RegisterMemory() - self.registers.add_register("RI", WORD_SIZE) - self.registers.add_register("SP", BYTE_SIZE) - self.registers.put("SP", 0, BYTE_SIZE) + self.registers.add_register('RI', self.operand_size) + self.alu = ArithmeticLogicUnit(self.registers, self.control_unit.register_names, - WORD_SIZE, - BYTE_SIZE) + self.operand_size, + self.address_size) self.control_unit.alu = self.alu - self.ram.put(0x00, 0x99, BYTE_SIZE) - self.registers.put("PC", 0, BYTE_SIZE) + self.ram.put(0x00, 0x9900, 2 * BYTE_SIZE) + self.registers.put("PC", 0, 2 * BYTE_SIZE) self.control_unit.run() - assert self.registers.fetch("PC", BYTE_SIZE) == 0x01 - assert self.registers.fetch("SP", BYTE_SIZE) == 0 + assert self.registers.fetch("PC", 2 * BYTE_SIZE) == 0x01 assert self.control_unit.get_status() == HALTED + diff --git a/modelmachine/tests/test_numeric.py b/modelmachine/tests/test_numeric.py index df42766..b96f034 100644 --- a/modelmachine/tests/test_numeric.py +++ b/modelmachine/tests/test_numeric.py @@ -211,3 +211,21 @@ class TestNumeric: assert dic[self.first] == 10 assert dic[self.second] == 11 assert dic[third] == 10 + + def test_getitem(self): + """Test if we can get Integer bits.""" + assert self.first[0] == Integer(0, 1, False) + assert self.first[1] == Integer(1, 1, False) + assert self.first[2] == Integer(0, 1, False) + assert self.first[3] == Integer(1, 1, False) + assert self.first[4] == Integer(0, 1, False) + assert self.first[5] == Integer(0, 1, False) + assert self.second[0] == Integer(0, 1, False) + assert self.second[1] == Integer(0, 1, False) + assert self.second[2] == Integer(1, 1, False) + assert self.second[3] == Integer(1, 1, False) + assert self.second[4] == Integer(0, 1, False) + assert self.second[5] == Integer(0, 1, False) + assert self.first[0:6] == Integer(10, 6, False) + assert self.first[:6] == Integer(10, 6, False) + assert self.first[3:] == Integer(1, 32 - 3, False)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 3, "issue_text_score": 3, "test_score": 3 }, "num_modified_files": 10 }
0.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup==1.2.2 iniconfig==2.1.0 -e git+https://github.com/cmc-python/modelmachine.git@fa9275d64498c7cbe24f02357bbb1bc971670756#egg=modelmachine packaging==24.2 pluggy==1.5.0 pytest==8.3.5 tomli==2.2.1
name: modelmachine channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - exceptiongroup==1.2.2 - iniconfig==2.1.0 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - tomli==2.2.1 prefix: /opt/conda/envs/modelmachine
[ "modelmachine/tests/test_numeric.py::TestNumeric::test_index" ]
[ "modelmachine/tests/test_cpu.py::TestAbstractCPU::test_load_program", "modelmachine/tests/test_cpu.py::TestAbstractCPU::test_print_result", "modelmachine/tests/test_cpu.py::TestAbstractCPU::test_run_file", "modelmachine/tests/test_cpu.py::TestCPUMM3::test_smoke", "modelmachine/tests/test_cpu.py::TestCPUMM2::test_smoke", "modelmachine/tests/test_cpu.py::TestCPUMMV::test_smoke", "modelmachine/tests/test_cpu.py::TestCPUMM1::test_smoke", "modelmachine/tests/test_cpu.py::TestCPUMMM::test_smoke", "modelmachine/tests/test_cu_abstract.py::TestAbstractControlUnit::test_get_status", "modelmachine/tests/test_cu_abstract.py::TestAbstractControlUnit::test_abstract_methods", "modelmachine/tests/test_cu_abstract.py::TestAbstractControlUnit::test_step_and_run", "modelmachine/tests/test_cu_abstract.py::TestControlUnit::test_const", "modelmachine/tests/test_cu_abstract.py::TestControlUnit::test_fetch_and_decode", "modelmachine/tests/test_cu_abstract.py::TestControlUnit::test_load", "modelmachine/tests/test_cu_abstract.py::TestControlUnit::test_write_back", "modelmachine/tests/test_cu_abstract.py::TestControlUnit::test_fetch_instruction", "modelmachine/tests/test_cu_abstract.py::TestControlUnit::test_basic_execute", "modelmachine/tests/test_cu_fixed.py::TestControlUnit3::test_const", "modelmachine/tests/test_cu_fixed.py::TestControlUnit3::test_fetch_instruction", "modelmachine/tests/test_cu_fixed.py::TestControlUnit3::test_fetch_and_decode", "modelmachine/tests/test_cu_fixed.py::TestControlUnit3::test_load", "modelmachine/tests/test_cu_fixed.py::TestControlUnit3::test_basic_execute", "modelmachine/tests/test_cu_fixed.py::TestControlUnit3::test_execute_cond_jumps", "modelmachine/tests/test_cu_fixed.py::TestControlUnit3::test_execute_jump_halt", "modelmachine/tests/test_cu_fixed.py::TestControlUnit3::test_write_back", "modelmachine/tests/test_cu_fixed.py::TestControlUnit3::test_step", "modelmachine/tests/test_cu_fixed.py::TestControlUnit3::test_run", "modelmachine/tests/test_cu_fixed.py::TestControlUnit2::test_const", "modelmachine/tests/test_cu_fixed.py::TestControlUnit2::test_fetch_instruction", "modelmachine/tests/test_cu_fixed.py::TestControlUnit2::test_basic_execute", "modelmachine/tests/test_cu_fixed.py::TestControlUnit2::test_execute_cond_jumps", "modelmachine/tests/test_cu_fixed.py::TestControlUnit2::test_execute_jump_halt", "modelmachine/tests/test_cu_fixed.py::TestControlUnit2::test_fetch_and_decode", "modelmachine/tests/test_cu_fixed.py::TestControlUnit2::test_load", "modelmachine/tests/test_cu_fixed.py::TestControlUnit2::test_execute_comp", "modelmachine/tests/test_cu_fixed.py::TestControlUnit2::test_write_back", "modelmachine/tests/test_cu_fixed.py::TestControlUnit2::test_step", "modelmachine/tests/test_cu_fixed.py::TestControlUnit2::test_run", "modelmachine/tests/test_cu_fixed.py::TestControlUnit1::test_const", "modelmachine/tests/test_cu_fixed.py::TestControlUnit1::test_fetch_instruction", "modelmachine/tests/test_cu_fixed.py::TestControlUnit1::test_execute_cond_jumps", "modelmachine/tests/test_cu_fixed.py::TestControlUnit1::test_execute_jump_halt", "modelmachine/tests/test_cu_fixed.py::TestControlUnit1::test_fetch_and_decode", "modelmachine/tests/test_cu_fixed.py::TestControlUnit1::test_load", "modelmachine/tests/test_cu_fixed.py::TestControlUnit1::test_basic_execute", "modelmachine/tests/test_cu_fixed.py::TestControlUnit1::test_execute_comp", "modelmachine/tests/test_cu_fixed.py::TestControlUnit1::test_execute_load_store_swap", "modelmachine/tests/test_cu_fixed.py::TestControlUnit1::test_write_back", "modelmachine/tests/test_cu_fixed.py::TestControlUnit1::test_step", "modelmachine/tests/test_cu_fixed.py::TestControlUnit1::test_run", "modelmachine/tests/test_cu_variable.py::TestControlUnitV::test_const", "modelmachine/tests/test_cu_variable.py::TestControlUnitV::test_fetch_instruction", "modelmachine/tests/test_cu_variable.py::TestControlUnitV::test_basic_execute", "modelmachine/tests/test_cu_variable.py::TestControlUnitV::test_execute_cond_jumps", "modelmachine/tests/test_cu_variable.py::TestControlUnitV::test_execute_jump_halt", "modelmachine/tests/test_cu_variable.py::TestControlUnitV::test_execute_comp", "modelmachine/tests/test_cu_variable.py::TestControlUnitV::test_write_back", "modelmachine/tests/test_cu_variable.py::TestControlUnitV::test_fetch_and_decode", "modelmachine/tests/test_cu_variable.py::TestControlUnitV::test_load", "modelmachine/tests/test_cu_variable.py::TestControlUnitV::test_step", "modelmachine/tests/test_cu_variable.py::TestControlUnitV::test_run", "modelmachine/tests/test_cu_variable.py::TestControlUnitV::test_minimal_run", "modelmachine/tests/test_cu_variable.py::TestControlUnitM::test_fetch_instruction", "modelmachine/tests/test_cu_variable.py::TestControlUnitM::test_execute_cond_jumps", "modelmachine/tests/test_cu_variable.py::TestControlUnitM::test_execute_jump_halt", "modelmachine/tests/test_cu_variable.py::TestControlUnitM::test_execute_comp", "modelmachine/tests/test_cu_variable.py::TestControlUnitM::test_const", "modelmachine/tests/test_cu_variable.py::TestControlUnitM::test_fetch_and_decode", "modelmachine/tests/test_cu_variable.py::TestControlUnitM::test_load", "modelmachine/tests/test_cu_variable.py::TestControlUnitM::test_basic_execute", "modelmachine/tests/test_cu_variable.py::TestControlUnitM::test_write_back", "modelmachine/tests/test_cu_variable.py::TestControlUnitM::test_step", "modelmachine/tests/test_cu_variable.py::TestControlUnitM::test_run", "modelmachine/tests/test_cu_variable.py::TestControlUnitM::test_minimal_run", "modelmachine/tests/test_numeric.py::TestNumeric::test_init", "modelmachine/tests/test_numeric.py::TestNumeric::test_check_compability", "modelmachine/tests/test_numeric.py::TestNumeric::test_get_value", "modelmachine/tests/test_numeric.py::TestNumeric::test_add", "modelmachine/tests/test_numeric.py::TestNumeric::test_mul", "modelmachine/tests/test_numeric.py::TestNumeric::test_sub", "modelmachine/tests/test_numeric.py::TestNumeric::test_eq", "modelmachine/tests/test_numeric.py::TestNumeric::test_divmod", "modelmachine/tests/test_numeric.py::TestNumeric::test_get_data", "modelmachine/tests/test_numeric.py::TestNumeric::test_hash", "modelmachine/tests/test_numeric.py::TestNumeric::test_getitem" ]
[]
[]
Do What The F*ck You Want To Public License
364
Pylons__webob-229
87a1254c1818859c066268755621254d2ab086a0
2016-01-03 07:23:46
9400c049d05c8ba350daf119aa16ded24ece31f6
diff --git a/contributing.md b/contributing.md deleted file mode 100644 index ad0ae99..0000000 --- a/contributing.md +++ /dev/null @@ -1,111 +0,0 @@ -Contributing -============ - -All projects under the Pylons Projects, including this one, follow the -guidelines established at [How to -Contribute](http://www.pylonsproject.org/community/how-to-contribute) and -[Coding Style and -Standards](http://docs.pylonsproject.org/en/latest/community/codestyle.html). - -You can contribute to this project in several ways. - -* [File an Issue on GitHub](https://github.com/Pylons/webob/issues) -* Fork this project and create a branch with your suggested change. When ready, - submit a pull request for consideration. [GitHub - Flow](https://guides.github.com/introduction/flow/index.html) describes the - workflow process and why it's a good practice. -* Join the IRC channel #pyramid on irc.freenode.net. - - -Git Branches ------------- -Git branches and their purpose and status at the time of this writing are -listed below. - -* [master](https://github.com/Pylons/webob/) - The branch on which further -development takes place. The default branch on GitHub. -* [1.5-branch](https://github.com/Pylons/webob/tree/1.5-branch) - The branch -classified as "stable" or "latest". Actively maintained. -* [1.4-branch](https://github.com/Pylons/webob/tree/1.4-branch) - The oldest -actively maintained and stable branch. - -Older branches are not actively maintained. In general, two stable branches and -one or two development branches are actively maintained. - - -Running Tests -------------- - -*Note:* This section needs better instructions. - -Run `tox` from within your checkout. This will run the tests across all -supported systems and attempt to build the docs. - -To run the tests for Python 2.x only: - - $ tox py2-cover - -To build the docs for Python 3.x only: - - $ tox py3-docs - -See the `tox.ini` file for details. - - -Building documentation for a Pylons Project project ---------------------------------------------------- - -*Note:* These instructions might not work for Windows users. Suggestions to -improve the process for Windows users are welcome by submitting an issue or a -pull request. - -1. Fork the repo on GitHub by clicking the [Fork] button. -2. Clone your fork into a workspace on your local machine. - - git clone [email protected]:<username>/webob.git - -3. Add a git remote "upstream" for the cloned fork. - - git remote add upstream [email protected]:Pylons/webob.git - -4. Set an environment variable to your virtual environment. - - # Mac and Linux - $ export VENV=~/hack-on-webob/env - - # Windows - set VENV=c:\hack-on-webob\env - -5. Try to build the docs in your workspace. - - # Mac and Linux - $ make clean html SPHINXBUILD=$VENV/bin/sphinx-build - - # Windows - c:\> make clean html SPHINXBUILD=%VENV%\bin\sphinx-build - - If successful, then you can make changes to the documentation. You can - load the built documentation in the `/_build/html/` directory in a web - browser. - -6. From this point forward, follow the typical [git - workflow](https://help.github.com/articles/what-is-a-good-git-workflow/). - Start by pulling from the upstream to get the most current changes. - - git pull upstream master - -7. Make a branch, make changes to the docs, and rebuild them as indicated in - step 5. To speed up the build process, you can omit `clean` from the above - command to rebuild only those pages that depend on the files you have - changed. - -8. Once you are satisfied with your changes and the documentation builds - successfully without errors or warnings, then git commit and push them to - your "origin" repository on GitHub. - - git commit -m "commit message" - git push -u origin --all # first time only, subsequent can be just 'git push'. - -9. Create a [pull request](https://help.github.com/articles/using-pull-requests/). - -10. Repeat the process starting from Step 6. \ No newline at end of file diff --git a/docs/do-it-yourself.txt b/docs/do-it-yourself.txt index 3b65c7d..381051c 100644 --- a/docs/do-it-yourself.txt +++ b/docs/do-it-yourself.txt @@ -324,7 +324,7 @@ Now we'll show a basic application. Just a hello world application for now. No ... return 'Hello %s!' % req.params['name'] ... elif req.method == 'GET': ... return '''<form method="POST"> - ... Your name: <input type="text" name="name"> + ... You're name: <input type="text" name="name"> ... <input type="submit"> ... </form>''' >>> hello_world = Router() @@ -342,7 +342,7 @@ Now let's test that application: Content-Length: 131 <BLANKLINE> <form method="POST"> - Your name: <input type="text" name="name"> + You're name: <input type="text" name="name"> <input type="submit"> </form> >>> req.method = 'POST' @@ -421,7 +421,7 @@ Here's the hello world: ... self.request = req ... def get(self): ... return '''<form method="POST"> - ... Your name: <input type="text" name="name"> + ... You're name: <input type="text" name="name"> ... <input type="submit"> ... </form>''' ... def post(self): @@ -442,7 +442,7 @@ We'll run the same test as before: Content-Length: 131 <BLANKLINE> <form method="POST"> - Your name: <input type="text" name="name"> + You're name: <input type="text" name="name"> <input type="submit"> </form> >>> req.method = 'POST' @@ -462,7 +462,7 @@ You can use hard-coded links in your HTML, but this can have problems. Relative The base URL using SCRIPT_NAME is ``req.application_url``. So, if we have access to the request we can make a URL. But what if we don't have access? -We can use thread-local variables to make it easy for any function to get access to the current request. A "thread-local" variable is a variable whose value is tracked separately for each thread, so if there are multiple requests in different threads, their requests won't clobber each other. +We can use thread-local variables to make it easy for any function to get access to the currect request. A "thread-local" variable is a variable whose value is tracked separately for each thread, so if there are multiple requests in different threads, their requests won't clobber each other. The basic means of using a thread-local variable is ``threading.local()``. This creates a blank object that can have thread-local attributes assigned to it. I find the best way to get *at* a thread-local value is with a function, as this makes it clear that you are fetching the object, as opposed to getting at some global object. diff --git a/docs/index.txt b/docs/index.txt index 49575a8..d96240b 100644 --- a/docs/index.txt +++ b/docs/index.txt @@ -1,5 +1,3 @@ -.. _index: - WebOb +++++ diff --git a/webob/descriptors.py b/webob/descriptors.py index 505a2b6..5fd26eb 100644 --- a/webob/descriptors.py +++ b/webob/descriptors.py @@ -138,6 +138,9 @@ def header_getter(header, rfc_section): def fset(r, value): fdel(r) if value is not None: + if '\n' in value or '\r' in value: + raise ValueError('Header value may not contain control characters') + if isinstance(value, text_type) and not PY3: value = value.encode('latin-1') r._headerlist.append((header, value)) diff --git a/webob/exc.py b/webob/exc.py index a67a867..57a81b5 100644 --- a/webob/exc.py +++ b/webob/exc.py @@ -481,6 +481,9 @@ ${html_comment}''') detail=detail, headers=headers, comment=comment, body_template=body_template) if location is not None: + if '\n' in location or '\r' in location: + raise ValueError('Control characters are not allowed in location') + self.location = location if add_slash: raise TypeError(
Possible HTTP Response Splitting Vulnerability Hi, Please review the published advisory, probably it's in the API WebOb which is not documented here: http://docs.webob.org/en/latest/api/exceptions.html Probably there are other WebOb applications with similar issues. Here is the advisory: http://www.zeroscience.mk/en/vulnerabilities/ZSL-2015-5267.php Thanks
Pylons/webob
diff --git a/tests/test_descriptors.py b/tests/test_descriptors.py index 7bf229f..eb3d316 100644 --- a/tests/test_descriptors.py +++ b/tests/test_descriptors.py @@ -155,6 +155,14 @@ def test_header_getter_fset_text(): desc.fset(resp, text_('avalue')) eq_(desc.fget(resp), 'avalue') +def test_header_getter_fset_text_control_chars(): + from webob.compat import text_ + from webob.descriptors import header_getter + from webob import Response + resp = Response('aresp') + desc = header_getter('AHEADER', '14.3') + assert_raises(ValueError, desc.fset, resp, text_('\n')) + def test_header_getter_fdel(): from webob.descriptors import header_getter from webob import Response diff --git a/tests/test_exc.py b/tests/test_exc.py index 4f7c238..dcb1fed 100644 --- a/tests/test_exc.py +++ b/tests/test_exc.py @@ -259,6 +259,17 @@ def test_HTTPMove_location_not_none(): m = webob_exc._HTTPMove(location='http://example.com') assert_equal( m( environ, start_response ), [] ) +def test_HTTPMove_location_newlines(): + environ = { + 'wsgi.url_scheme': 'HTTP', + 'SERVER_NAME': 'localhost', + 'SERVER_PORT': '80', + 'REQUEST_METHOD': 'HEAD', + 'PATH_INFO': '/', + } + assert_raises(ValueError, webob_exc._HTTPMove, + location='http://example.com\r\nX-Test: false') + def test_HTTPMove_add_slash_and_location(): def start_response(status, headers, exc_info=None): pass
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_removed_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 4 }
1.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "nose", "coverage", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
coverage==7.8.0 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work nose==1.3.7 packaging @ file:///croot/packaging_1734472117206/work pluggy @ file:///croot/pluggy_1733169602837/work pytest @ file:///croot/pytest_1738938843180/work tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work -e git+https://github.com/Pylons/webob.git@87a1254c1818859c066268755621254d2ab086a0#egg=WebOb
name: webob channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.8.0 - nose==1.3.7 prefix: /opt/conda/envs/webob
[ "tests/test_descriptors.py::test_header_getter_fset_text_control_chars", "tests/test_exc.py::test_HTTPMove_location_newlines" ]
[]
[ "tests/test_descriptors.py::test_environ_getter_docstring", "tests/test_descriptors.py::test_environ_getter_nodefault_keyerror", "tests/test_descriptors.py::test_environ_getter_nodefault_fget", "tests/test_descriptors.py::test_environ_getter_nodefault_fdel", "tests/test_descriptors.py::test_environ_getter_default_fget", "tests/test_descriptors.py::test_environ_getter_default_fset", "tests/test_descriptors.py::test_environ_getter_default_fset_none", "tests/test_descriptors.py::test_environ_getter_default_fdel", "tests/test_descriptors.py::test_environ_getter_rfc_section", "tests/test_descriptors.py::test_upath_property_fget", "tests/test_descriptors.py::test_upath_property_fset", "tests/test_descriptors.py::test_header_getter_doc", "tests/test_descriptors.py::test_header_getter_fget", "tests/test_descriptors.py::test_header_getter_fset", "tests/test_descriptors.py::test_header_getter_fset_none", "tests/test_descriptors.py::test_header_getter_fset_text", "tests/test_descriptors.py::test_header_getter_fdel", "tests/test_descriptors.py::test_header_getter_unicode_fget_none", "tests/test_descriptors.py::test_header_getter_unicode_fget", "tests/test_descriptors.py::test_header_getter_unicode_fset_none", "tests/test_descriptors.py::test_header_getter_unicode_fset", "tests/test_descriptors.py::test_header_getter_unicode_fdel", "tests/test_descriptors.py::test_converter_not_prop", "tests/test_descriptors.py::test_converter_with_name_docstring", "tests/test_descriptors.py::test_converter_with_name_fget", "tests/test_descriptors.py::test_converter_with_name_fset", "tests/test_descriptors.py::test_converter_without_name_fget", "tests/test_descriptors.py::test_converter_without_name_fset", "tests/test_descriptors.py::test_converter_none_for_wrong_type", "tests/test_descriptors.py::test_converter_delete", "tests/test_descriptors.py::test_list_header", "tests/test_descriptors.py::test_parse_list_single", "tests/test_descriptors.py::test_parse_list_multiple", "tests/test_descriptors.py::test_parse_list_none", "tests/test_descriptors.py::test_parse_list_unicode_single", "tests/test_descriptors.py::test_parse_list_unicode_multiple", "tests/test_descriptors.py::test_serialize_list", "tests/test_descriptors.py::test_serialize_list_string", "tests/test_descriptors.py::test_serialize_list_unicode", "tests/test_descriptors.py::test_converter_date", "tests/test_descriptors.py::test_converter_date_docstring", "tests/test_descriptors.py::test_date_header_fget_none", "tests/test_descriptors.py::test_date_header_fset_fget", "tests/test_descriptors.py::test_date_header_fdel", "tests/test_descriptors.py::test_deprecated_property", "tests/test_descriptors.py::test_parse_etag_response", "tests/test_descriptors.py::test_parse_etag_response_quoted", "tests/test_descriptors.py::test_parse_etag_response_is_none", "tests/test_descriptors.py::test_serialize_etag_response", "tests/test_descriptors.py::test_serialize_if_range_string", "tests/test_descriptors.py::test_serialize_if_range_unicode", "tests/test_descriptors.py::test_serialize_if_range_datetime", "tests/test_descriptors.py::test_serialize_if_range_other", "tests/test_descriptors.py::test_parse_range_none", "tests/test_descriptors.py::test_parse_range_type", "tests/test_descriptors.py::test_parse_range_values", "tests/test_descriptors.py::test_serialize_range_none", "tests/test_descriptors.py::test_serialize_range", "tests/test_descriptors.py::test_parse_int_none", "tests/test_descriptors.py::test_parse_int_emptystr", "tests/test_descriptors.py::test_parse_int", "tests/test_descriptors.py::test_parse_int_invalid", "tests/test_descriptors.py::test_parse_int_safe_none", "tests/test_descriptors.py::test_parse_int_safe_emptystr", "tests/test_descriptors.py::test_parse_int_safe", "tests/test_descriptors.py::test_parse_int_safe_invalid", "tests/test_descriptors.py::test_serialize_int", "tests/test_descriptors.py::test_parse_content_range_none", "tests/test_descriptors.py::test_parse_content_range_emptystr", "tests/test_descriptors.py::test_parse_content_range_length", "tests/test_descriptors.py::test_parse_content_range_start", "tests/test_descriptors.py::test_parse_content_range_stop", "tests/test_descriptors.py::test_serialize_content_range_none", "tests/test_descriptors.py::test_serialize_content_range_emptystr", "tests/test_descriptors.py::test_serialize_content_range_invalid", "tests/test_descriptors.py::test_serialize_content_range_asterisk", "tests/test_descriptors.py::test_serialize_content_range_defined", "tests/test_descriptors.py::test_parse_auth_params_leading_capital_letter", "tests/test_descriptors.py::test_parse_auth_params_trailing_capital_letter", "tests/test_descriptors.py::test_parse_auth_params_doublequotes", "tests/test_descriptors.py::test_parse_auth_params_multiple_values", "tests/test_descriptors.py::test_parse_auth_params_truncate_on_comma", "tests/test_descriptors.py::test_parse_auth_params_emptystr", "tests/test_descriptors.py::test_parse_auth_params_bad_whitespace", "tests/test_descriptors.py::test_authorization2", "tests/test_descriptors.py::test_parse_auth_none", "tests/test_descriptors.py::test_parse_auth_emptystr", "tests/test_descriptors.py::test_parse_auth_basic", "tests/test_descriptors.py::test_parse_auth_basic_quoted", "tests/test_descriptors.py::test_parse_auth_basic_quoted_multiple_unknown", "tests/test_descriptors.py::test_parse_auth_basic_quoted_known_multiple", "tests/test_descriptors.py::test_serialize_auth_none", "tests/test_descriptors.py::test_serialize_auth_emptystr", "tests/test_descriptors.py::test_serialize_auth_basic_quoted", "tests/test_descriptors.py::test_serialize_auth_digest_multiple", "tests/test_descriptors.py::test_serialize_auth_digest_tuple", "tests/test_descriptors.py::TestEnvironDecoder::test_default_fdel", "tests/test_descriptors.py::TestEnvironDecoder::test_default_fget", "tests/test_descriptors.py::TestEnvironDecoder::test_default_fset", "tests/test_descriptors.py::TestEnvironDecoder::test_default_fset_none", "tests/test_descriptors.py::TestEnvironDecoder::test_docstring", "tests/test_descriptors.py::TestEnvironDecoder::test_fget_nonascii", "tests/test_descriptors.py::TestEnvironDecoder::test_fset_nonascii", "tests/test_descriptors.py::TestEnvironDecoder::test_nodefault_fdel", "tests/test_descriptors.py::TestEnvironDecoder::test_nodefault_fget", "tests/test_descriptors.py::TestEnvironDecoder::test_nodefault_keyerror", "tests/test_descriptors.py::TestEnvironDecoder::test_rfc_section", "tests/test_descriptors.py::TestEnvironDecoderLegacy::test_default_fdel", "tests/test_descriptors.py::TestEnvironDecoderLegacy::test_default_fget", "tests/test_descriptors.py::TestEnvironDecoderLegacy::test_default_fget_nonascii", "tests/test_descriptors.py::TestEnvironDecoderLegacy::test_default_fset", "tests/test_descriptors.py::TestEnvironDecoderLegacy::test_default_fset_none", "tests/test_descriptors.py::TestEnvironDecoderLegacy::test_docstring", "tests/test_descriptors.py::TestEnvironDecoderLegacy::test_fget_nonascii", "tests/test_descriptors.py::TestEnvironDecoderLegacy::test_fset_nonascii", "tests/test_descriptors.py::TestEnvironDecoderLegacy::test_nodefault_fdel", "tests/test_descriptors.py::TestEnvironDecoderLegacy::test_nodefault_fget", "tests/test_descriptors.py::TestEnvironDecoderLegacy::test_nodefault_keyerror", "tests/test_descriptors.py::TestEnvironDecoderLegacy::test_rfc_section", "tests/test_exc.py::test_noescape_null", "tests/test_exc.py::test_noescape_not_basestring", "tests/test_exc.py::test_noescape_unicode", "tests/test_exc.py::test_strip_tags_empty", "tests/test_exc.py::test_strip_tags_newline_to_space", "tests/test_exc.py::test_strip_tags_zaps_carriage_return", "tests/test_exc.py::test_strip_tags_br_to_newline", "tests/test_exc.py::test_strip_tags_zaps_comments", "tests/test_exc.py::test_strip_tags_zaps_tags", "tests/test_exc.py::test_HTTPException", "tests/test_exc.py::test_exception_with_unicode_data", "tests/test_exc.py::test_WSGIHTTPException_headers", "tests/test_exc.py::test_WSGIHTTPException_w_body_template", "tests/test_exc.py::test_WSGIHTTPException_w_empty_body", "tests/test_exc.py::test_WSGIHTTPException___str__", "tests/test_exc.py::test_WSGIHTTPException_plain_body_no_comment", "tests/test_exc.py::test_WSGIHTTPException_html_body_w_comment", "tests/test_exc.py::test_WSGIHTTPException_generate_response", "tests/test_exc.py::test_WSGIHTTPException_call_w_body", "tests/test_exc.py::test_WSGIHTTPException_wsgi_response", "tests/test_exc.py::test_WSGIHTTPException_exception_newstyle", "tests/test_exc.py::test_WSGIHTTPException_exception_no_newstyle", "tests/test_exc.py::test_HTTPOk_head_of_proxied_head", "tests/test_exc.py::test_HTTPMove", "tests/test_exc.py::test_HTTPMove_location_not_none", "tests/test_exc.py::test_HTTPMove_add_slash_and_location", "tests/test_exc.py::test_HTTPMove_call_add_slash", "tests/test_exc.py::test_HTTPMove_call_query_string", "tests/test_exc.py::test_HTTPExceptionMiddleware_ok", "tests/test_exc.py::test_HTTPExceptionMiddleware_exception", "tests/test_exc.py::test_HTTPExceptionMiddleware_exception_exc_info_none", "tests/test_exc.py::test_status_map_is_deterministic" ]
[]
null
365
pika__pika-685
8be81a21d8b554ee9af4fae08907956e5b8b138f
2016-01-04 01:56:37
f73f9bbaddd90b03583a6693f6158e56fbede948
vitaly-krugl: Can anyone help me figure out why `assert_any_call` is failing in the python 2.6 build https://travis-ci.org/pika/pika/jobs/100035773 ? Many thanks CC @gst, @gmr gst: having a look into that.. gst: can't reproduce directly on my side.. gst: the only error I can trigger (in python2.6 but I think it's the same with others versions), is this one : ``` Test that poll() is properly restarted after receiving EINTR error. ... FAIL ====================================================================== FAIL: Test that poll() is properly restarted after receiving EINTR error. ---------------------------------------------------------------------- Traceback (most recent call last): File "/usr/local/lib/python2.7/dist-packages/mock.py", line 1201, in patched return func(*args, **keywargs) File "/home/gstarck/work/public/python/pika/tests/unit/select_connection_ioloop_tests.py", line 396, in test_eintr self.assertEqual(is_resumable_mock.call_count, 1) AssertionError: 0 != 1 -------------------- >> begin captured logging << -------------------- pika.adapters.select_connection: DEBUG: Using SelectPoller pika.adapters.select_connection: DEBUG: Using SelectPoller pika.adapters.select_connection: DEBUG: Starting IOLoop pika.adapters.select_connection: DEBUG: Stopping IOLoop --------------------- >> end captured logging << --------------------- ``` I can trigger it, "simply" by executing many instances of the test at the same time, with something like this : `$ for i in $(seq 100) ; do ( nosetests -x tests/acceptance/blocking_adapter_test.py:TestUnroutableMessagesReturnedInNonPubackMode &>/tmp/res$i || echo res$i failed) & done` this also triggers quite a lot of Timed out errors which is expected given the load generated by the execution of so many tests in // .. gst: strange, when you give a look at the debug output : ``` root: DEBUG: ZZZ self.connection.callbacks.process.call_args_list: [call(0, '_on_connection_error', <pika.connection.Connection object at 0x2f92dd0>, <pika.connection.Connection object at 0x2f92dd0>, Client was disconnected at a connection stage indicating a probable denial of access to the specified virtual host: (1, 'error text')), ``` you see the mock has well been called with the, as far as i see, expected arguments.. strange.. gst: For refs/archive the actual error: ``` ====================================================================== FAIL: on_disconnect invokes `ON_CONNECTION_ERROR` with `ProbableAccessDeniedError` and `ON_CONNECTION_CLOSED` callbacks ---------------------------------------------------------------------- Traceback (most recent call last): File "/home/travis/build/pika/pika/tests/unit/connection_tests.py", line 228, in test_on_disconnect_invokes_access_on_connection_error_and_closed mock.ANY) File "/home/travis/virtualenv/python2.6.9/lib/python2.6/site-packages/mock/mock.py", line 999, in assert_any_call ), cause) File "/home/travis/virtualenv/python2.6.9/lib/python2.6/site-packages/six.py", line 718, in raise_from raise value AssertionError: mock(0, '_on_connection_error', <pika.connection.Connection object at 0x2ec4dd0>, <pika.connection.Connection object at 0x2ec4dd0>, <ANY>) call not found -------------------- >> begin captured logging << -------------------- pika.callback: DEBUG: Added: {'callback': <bound method Connection._on_connection_error of <pika.connection.Connection object at 0x2ec4dd0>>, 'only': None, 'one_shot': False, 'arguments': None} pika.callback: DEBUG: Added: {'callback': <bound method Connection._on_connection_start of <pika.connection.Connection object at 0x2ec4dd0>>, 'only': None, 'one_shot': True, 'arguments': None, 'calls': 1} pika.callback: DEBUG: Added: {'callback': <bound method Connection._on_connection_closed of <pika.connection.Connection object at 0x2ec4dd0>>, 'only': None, 'one_shot': True, 'arguments': None, 'calls': 1} pika.connection: WARNING: Disconnected from RabbitMQ at localhost:5672 from_adapter=True (1): error text pika.connection: ERROR: Socket closed while tuning the connection indicating a probable permission error when accessing a virtual host pika.connection: ERROR: Connection setup failed due to Client was disconnected at a connection stage indicating a probable denial of access to the specified virtual host: (1, 'error text') pika.callback: DEBUG: Incremented callback reference counter: {'callback': <bound method Connection._on_connection_start of <pika.connection.Connection object at 0x2ec4dd0>>, 'only': None, 'one_shot': True, 'arguments': None, 'calls': 2} pika.callback: DEBUG: Incremented callback reference counter: {'callback': <bound method Connection._on_connection_closed of <pika.connection.Connection object at 0x2ec4dd0>>, 'only': None, 'one_shot': True, 'arguments': None, 'calls': 2} root: DEBUG: ZZZ self.connection.callbacks.process.call_args_list: [call(0, '_on_connection_error', <pika.connection.Connection object at 0x2ec4dd0>, <pika.connection.Connection object at 0x2ec4dd0>, Client was disconnected at a connection stage indicating a probable denial of access to the specified virtual host: (1, 'error text')), call(0, '_on_connection_closed', <pika.connection.Connection object at 0x2ec4dd0>, <pika.connection.Connection object at 0x2ec4dd0>, 1, 'error text')] --------------------- >> end captured logging << --------------------- ``` does it reproduce on travis ? gst: could be due to mock version.. gst: @vitaly-krugl I'd like to know the mock version used by travis, how can I know ? in fact: it would be good if before the execution of the tests there was a "pip freeze" executed, so that we can know all versions which are in use by travis.. vitaly-krugl: @gst, I was off the grid for a few days. Just got back today. Thanks for looking at this problem. I don't know which version of mock it uses, but https://github.com/pika/pika/blob/master/test-requirements.txt specifies `mock` without a version. This would imply that it would load the most recent version. I like your idea about adding `pip freeze`. Perhaps this is something that could be added via https://github.com/pika/pika/blob/master/.travis.yml? vitaly-krugl: @gst, I am able to reproduce these problems with mock 1.3.0 on python 2.7.10. I've had nothing but trouble with the latest mock. vitaly-krugl: @gst, getting rid of the spec arg in the mock allows the tests to pass now. I also switched to a context manager, but for patching, but I don't think that the context manager is important in this case. However, the spec/autospec/spec_set are really valuable, and it's a shame that I can't get the tests to pass using them. gst: @vitaly-krugl : good to know about the reproduce :) and yes for having pip freeze output it's effectively within `.travis.yml` ; could be put after the ` - pip install -r test-requirements.txt` in install section.. vitaly-krugl: @gst, would you like to submit a PR with the `pip freeze` change? thx gst: here it is.. https://github.com/pika/pika/pull/689
diff --git a/docs/version_history.rst b/docs/version_history.rst index 349fbe7..06530b7 100644 --- a/docs/version_history.rst +++ b/docs/version_history.rst @@ -10,6 +10,8 @@ Next Release - In BaseConnection.close, call _handle_ioloop_stop only if the connection is already closed to allow the asynchronous close operation to complete gracefully. + - Pass error information from failed socket connection to user callbacks + on_open_error_callback and on_close_callback with result_code=-1. 0.10.0 2015-09-02 ----------------- diff --git a/pika/adapters/base_connection.py b/pika/adapters/base_connection.py index 4bb436c..87afc99 100644 --- a/pika/adapters/base_connection.py +++ b/pika/adapters/base_connection.py @@ -10,7 +10,6 @@ import ssl import pika.compat from pika import connection -from pika import exceptions try: SOL_TCP = socket.SOL_TCP @@ -52,10 +51,10 @@ class BaseConnection(connection.Connection): :param pika.connection.Parameters parameters: Connection parameters :param method on_open_callback: Method to call on connection open - :param on_open_error_callback: Method to call if the connection cant - be opened - :type on_open_error_callback: method - :param method on_close_callback: Method to call on connection close + :param method on_open_error_callback: Called if the connection can't + be established: on_open_error_callback(connection, str|exception) + :param method on_close_callback: Called when the connection is closed: + on_close_callback(connection, reason_code, reason_text) :param object ioloop: IOLoop object to use :param bool stop_ioloop_on_close: Call ioloop.stop() if disconnected :raises: RuntimeError @@ -152,38 +151,9 @@ class BaseConnection(connection.Connection): def _adapter_disconnect(self): """Invoked if the connection is being told to disconnect""" try: - self._remove_heartbeat() self._cleanup_socket() - self._check_state_on_disconnect() finally: - # Ensure proper cleanup since _check_state_on_disconnect may raise - # an exception self._handle_ioloop_stop() - self._init_connection_state() - - def _check_state_on_disconnect(self): - """Checks to see if we were in opening a connection with RabbitMQ when - we were disconnected and raises exceptions for the anticipated - exception types. - - """ - if self.connection_state == self.CONNECTION_PROTOCOL: - LOGGER.error('Incompatible Protocol Versions') - raise exceptions.IncompatibleProtocolError - elif self.connection_state == self.CONNECTION_START: - LOGGER.error("Socket closed while authenticating indicating a " - "probable authentication error") - raise exceptions.ProbableAuthenticationError - elif self.connection_state == self.CONNECTION_TUNE: - LOGGER.error("Socket closed while tuning the connection indicating " - "a probable permission error when accessing a virtual " - "host") - raise exceptions.ProbableAccessDeniedError - elif self.is_open: - LOGGER.warning("Socket closed when connection was open") - elif not self.is_closed and not self.is_closing: - LOGGER.warning('Unknown state on disconnect: %i', - self.connection_state) def _cleanup_socket(self): """Close the socket cleanly""" @@ -272,11 +242,14 @@ class BaseConnection(connection.Connection): """ if not error_value: return None + if hasattr(error_value, 'errno'): # Python >= 2.6 return error_value.errno - elif error_value is not None: + else: + # TODO: this doesn't look right; error_value.args[0] ??? Could + # probably remove this code path since pika doesn't test against + # Python 2.5 return error_value[0] # Python <= 2.5 - return None def _flush_outbound(self): """Have the state manager schedule the necessary I/O. @@ -291,21 +264,6 @@ class BaseConnection(connection.Connection): # called), etc., etc., etc. self._manage_event_state() - def _handle_disconnect(self): - """Called internally when the socket is disconnected already - """ - try: - self._adapter_disconnect() - except (exceptions.ProbableAccessDeniedError, - exceptions.ProbableAuthenticationError) as error: - LOGGER.error('disconnected due to %r', error) - self.callbacks.process(0, - self.ON_CONNECTION_ERROR, - self, - self, error) - - self._on_connection_closed(None, True) - def _handle_ioloop_stop(self): """Invoked when the connection is closed to determine if the IOLoop should be stopped or not. @@ -323,9 +281,10 @@ class BaseConnection(connection.Connection): :param int|object error_value: The inbound error """ - if 'timed out' in str(error_value): - raise socket.timeout + # TODO: doesn't seem right: docstring defines error_value as int|object, + # but _get_error_code expects a falsie or an exception-like object error_code = self._get_error_code(error_value) + if not error_code: LOGGER.critical("Tried to handle an error where no error existed") return @@ -342,6 +301,8 @@ class BaseConnection(connection.Connection): elif self.params.ssl and isinstance(error_value, ssl.SSLError): if error_value.args[0] == ssl.SSL_ERROR_WANT_READ: + # TODO: doesn't seem right: this logic updates event state, but + # the logic at the bottom unconditionaly disconnects anyway. self.event_state = self.READ elif error_value.args[0] == ssl.SSL_ERROR_WANT_WRITE: self.event_state = self.WRITE @@ -353,20 +314,21 @@ class BaseConnection(connection.Connection): LOGGER.error("Socket Error: %s", error_code) # Disconnect from our IOLoop and let Connection know what's up - self._handle_disconnect() + self._on_terminate(-1, repr(error_value)) def _handle_timeout(self): """Handle a socket timeout in read or write. We don't do anything in the non-blocking handlers because we only have the socket in a blocking state during connect.""" - pass + LOGGER.warning("Unexpected socket timeout") def _handle_events(self, fd, events, error=None, write_only=False): """Handle IO/Event loop events, processing them. :param int fd: The file descriptor for the events :param int events: Events from the IO/Event loop - :param int error: Was an error specified + :param int error: Was an error specified; TODO none of the current + adapters appear to be able to pass the `error` arg - is it needed? :param bool write_only: Only handle write events """ @@ -382,10 +344,11 @@ class BaseConnection(connection.Connection): self._handle_read() if (self.socket and write_only and (events & self.READ) and - (events & self.ERROR)): - LOGGER.error('BAD libc: Write-Only but Read+Error. ' + (events & self.ERROR)): + error_msg = ('BAD libc: Write-Only but Read+Error. ' 'Assume socket disconnected.') - self._handle_disconnect() + LOGGER.error(error_msg) + self._on_terminate(-1, error_msg) if self.socket and (events & self.ERROR): LOGGER.error('Error event %r, %r', events, error) @@ -427,7 +390,7 @@ class BaseConnection(connection.Connection): # Empty data, should disconnect if not data or data == 0: LOGGER.error('Read empty data, calling disconnect') - return self._handle_disconnect() + return self._on_terminate(-1, "EOF") # Pass the data into our top level frame dispatching method self._on_data_available(data) diff --git a/pika/adapters/blocking_connection.py b/pika/adapters/blocking_connection.py index 1877dd6..f6881cd 100644 --- a/pika/adapters/blocking_connection.py +++ b/pika/adapters/blocking_connection.py @@ -394,7 +394,7 @@ class BlockingConnection(object): # pylint: disable=R0902 returning true when it's time to stop processing. Their results are OR'ed together. """ - if self._impl.is_closed: + if self.is_closed: raise exceptions.ConnectionClosed() # Conditions for terminating the processing loop: @@ -404,38 +404,35 @@ class BlockingConnection(object): # pylint: disable=R0902 # OR # empty outbound buffer and any waiter is ready is_done = (lambda: - self._closed_result.ready or - (not self._impl.outbound_buffer and - (not waiters or any(ready() for ready in waiters)))) + self._closed_result.ready or + (not self._impl.outbound_buffer and + (not waiters or any(ready() for ready in waiters)))) # Process I/O until our completion condition is satisified while not is_done(): self._impl.ioloop.poll() self._impl.ioloop.process_timeouts() - if self._closed_result.ready: + if self._open_error_result.ready or self._closed_result.ready: try: - result = self._closed_result.value - if result.reason_code not in [0, 200]: - LOGGER.critical('Connection close detected; result=%r', - result) - raise exceptions.ConnectionClosed(result.reason_code, - result.reason_text) - elif not self._user_initiated_close: - # NOTE: unfortunately, upon socket error, on_close_callback - # presently passes reason_code=0, so we don't detect that as - # an error + if not self._user_initiated_close: if self._open_error_result.ready: maybe_exception = self._open_error_result.value.error - LOGGER.critical('Connection open failed - %r', - maybe_exception) + LOGGER.error('Connection open failed - %r', + maybe_exception) if isinstance(maybe_exception, Exception): raise maybe_exception - - LOGGER.critical('Connection close detected') - raise exceptions.ConnectionClosed() + else: + raise exceptions.ConnectionClosed(maybe_exception) + else: + result = self._closed_result.value + LOGGER.error('Connection close detected; result=%r', + result) + raise exceptions.ConnectionClosed(result.reason_code, + result.reason_text) else: - LOGGER.info('Connection closed; result=%r', result) + LOGGER.info('Connection closed; result=%r', + self._closed_result.value) finally: self._cleanup() @@ -732,7 +729,8 @@ class BlockingConnection(object): # pylint: disable=R0902 @property def is_closing(self): """ - Returns a boolean reporting the current connection state. + Returns True if connection is in the process of closing due to + client-initiated `close` request, but closing is not yet complete. """ return self._impl.is_closing @@ -1143,7 +1141,8 @@ class BlockingChannel(object): # pylint: disable=R0904,R0902 @property def is_closing(self): - """Returns True if the channel is closing. + """Returns True if client-initiated closing of the channel is in + progress. :rtype: bool @@ -1173,7 +1172,7 @@ class BlockingChannel(object): # pylint: disable=R0904,R0902 returning true when it's time to stop processing. Their results are OR'ed together. """ - if self._impl.is_closed: + if self.is_closed: raise exceptions.ChannelClosed() if not waiters: diff --git a/pika/adapters/libev_connection.py b/pika/adapters/libev_connection.py index ed3ec81..ce491c9 100644 --- a/pika/adapters/libev_connection.py +++ b/pika/adapters/libev_connection.py @@ -84,9 +84,10 @@ class LibevConnection(BaseConnection): :param pika.connection.Parameters parameters: Connection parameters :param on_open_callback: The method to call when the connection is open :type on_open_callback: method - :param on_open_error_callback: Method to call if the connection cannot - be opened - :type on_open_error_callback: method + :param method on_open_error_callback: Called if the connection can't + be established: on_open_error_callback(connection, str|exception) + :param method on_close_callback: Called when the connection is closed: + on_close_callback(connection, reason_code, reason_text) :param bool stop_ioloop_on_close: Call ioloop.stop() if disconnected :param custom_ioloop: Override using the default IOLoop in libev :param on_signal_callback: Method to call if SIGINT or SIGTERM occur diff --git a/pika/adapters/select_connection.py b/pika/adapters/select_connection.py index 64e2bbe..645cb38 100644 --- a/pika/adapters/select_connection.py +++ b/pika/adapters/select_connection.py @@ -75,10 +75,10 @@ class SelectConnection(BaseConnection): :param pika.connection.Parameters parameters: Connection parameters :param method on_open_callback: Method to call on connection open - :param on_open_error_callback: Method to call if the connection cant - be opened - :type on_open_error_callback: method - :param method on_close_callback: Method to call on connection close + :param method on_open_error_callback: Called if the connection can't + be established: on_open_error_callback(connection, str|exception) + :param method on_close_callback: Called when the connection is closed: + on_close_callback(connection, reason_code, reason_text) :param bool stop_ioloop_on_close: Call ioloop.stop() if disconnected :param custom_ioloop: Override using the global IOLoop in Tornado :raises: RuntimeError diff --git a/pika/adapters/tornado_connection.py b/pika/adapters/tornado_connection.py index 1c5c607..ce407d1 100644 --- a/pika/adapters/tornado_connection.py +++ b/pika/adapters/tornado_connection.py @@ -39,9 +39,10 @@ class TornadoConnection(base_connection.BaseConnection): :param pika.connection.Parameters parameters: Connection parameters :param on_open_callback: The method to call when the connection is open :type on_open_callback: method - :param on_open_error_callback: Method to call if the connection cant - be opened - :type on_open_error_callback: method + :param method on_open_error_callback: Called if the connection can't + be established: on_open_error_callback(connection, str|exception) + :param method on_close_callback: Called when the connection is closed: + on_close_callback(connection, reason_code, reason_text) :param bool stop_ioloop_on_close: Call ioloop.stop() if disconnected :param custom_ioloop: Override using the global IOLoop in Tornado @@ -55,7 +56,7 @@ class TornadoConnection(base_connection.BaseConnection): def _adapter_connect(self): """Connect to the remote socket, adding the socket to the IOLoop if - connected. + connected. :rtype: bool diff --git a/pika/adapters/twisted_connection.py b/pika/adapters/twisted_connection.py index 2ee65b2..62e595c 100644 --- a/pika/adapters/twisted_connection.py +++ b/pika/adapters/twisted_connection.py @@ -105,6 +105,9 @@ class TwistedChannel(object): try: consumer_tag = self.__channel.basic_consume(*args, **kwargs) + # TODO this except without types would suppress system-exiting + # exceptions, such as SystemExit and KeyboardInterrupt. It should be at + # least `except Exception` and preferably more specific. except: return defer.fail() @@ -163,6 +166,9 @@ class TwistedChannel(object): try: method(*args, **kwargs) + # TODO this except without types would suppress system-exiting + # exceptions, such as SystemExit and KeyboardInterrupt. It should be + # at least `except Exception` and preferably more specific. except: return defer.fail() return d @@ -300,13 +306,6 @@ class TwistedConnection(base_connection.BaseConnection): self.ioloop.remove_handler(None) self._cleanup_socket() - def _handle_disconnect(self): - """Do not stop the reactor, this would cause the entire process to exit, - just fire the disconnect callbacks - - """ - self._on_connection_closed(None, True) - def _on_connected(self): """Call superclass and then update the event state to flush the outgoing frame out. Commit 50d842526d9f12d32ad9f3c4910ef60b8c301f59 removed a @@ -339,7 +338,7 @@ class TwistedConnection(base_connection.BaseConnection): if not reason.check(error.ConnectionDone): log.err(reason) - self._handle_disconnect() + self._on_terminate(-1, str(reason)) def doRead(self): self._handle_read() diff --git a/pika/channel.py b/pika/channel.py index 5c67c49..4af9a6e 100644 --- a/pika/channel.py +++ b/pika/channel.py @@ -29,7 +29,7 @@ class Channel(object): CLOSED = 0 OPENING = 1 OPEN = 2 - CLOSING = 3 + CLOSING = 3 # client-initiated close in progress _ON_CHANNEL_CLEANUP_CB_KEY = '_on_channel_cleanup' @@ -615,7 +615,8 @@ class Channel(object): @property def is_closing(self): - """Returns True if the channel is closing. + """Returns True if client-initiated closing of the channel is in + progress. :rtype: bool diff --git a/pika/connection.py b/pika/connection.py index 6f59cd0..6288628 100644 --- a/pika/connection.py +++ b/pika/connection.py @@ -4,6 +4,7 @@ import sys import collections import logging import math +import numbers import platform import threading import warnings @@ -586,7 +587,7 @@ class Connection(object): CONNECTION_START = 3 CONNECTION_TUNE = 4 CONNECTION_OPEN = 5 - CONNECTION_CLOSING = 6 + CONNECTION_CLOSING = 6 # client-initiated close in progress def __init__(self, parameters=None, @@ -602,9 +603,10 @@ class Connection(object): :param pika.connection.Parameters parameters: Connection parameters :param method on_open_callback: Called when the connection is opened - :param method on_open_error_callback: Called if the connection cant - be opened - :param method on_close_callback: Called when the connection is closed + :param method on_open_error_callback: Called if the connection can't + be established: on_open_error_callback(connection, str|exception) + :param method on_close_callback: Called when the connection is closed: + on_close_callback(connection, reason_code, reason_text) """ self._write_lock = threading.Lock() @@ -770,8 +772,9 @@ class Connection(object): self.remaining_connection_attempts -= 1 LOGGER.warning('Could not connect, %i attempts left', self.remaining_connection_attempts) - if self.remaining_connection_attempts: + if self.remaining_connection_attempts > 0: LOGGER.info('Retrying in %i seconds', self.params.retry_delay) + # TODO: remove timeout if connection is closed before timer fires self.add_timeout(self.params.retry_delay, self.connect) else: self.callbacks.process(0, self.ON_CONNECTION_ERROR, self, self, @@ -813,7 +816,8 @@ class Connection(object): @property def is_closing(self): """ - Returns a boolean reporting the current connection state. + Returns True if connection is in the process of closing due to + client-initiated `close` request, but closing is not yet complete. """ return self.connection_state == self.CONNECTION_CLOSING @@ -1160,6 +1164,13 @@ class Connection(object): # Our starting point once connected, first frame received self._add_connection_start_callback() + # Add a callback handler for the Broker telling us to disconnect. + # NOTE: As of RabbitMQ 3.6.0, RabbitMQ broker may send Connection.Close + # to signal error during connection setup (and wait a longish time + # before closing the TCP/IP stream). Earlier RabbitMQ versions + # simply closed the TCP/IP stream. + self.callbacks.add(0, spec.Connection.Close, self._on_connection_close) + def _is_basic_deliver_frame(self, frame_value): """Returns true if the frame is a Basic.Deliver @@ -1169,17 +1180,6 @@ class Connection(object): """ return isinstance(frame_value, spec.Basic.Deliver) - def _is_connection_close_frame(self, value): - """Returns true if the frame is a Connection.Close frame. - - :param pika.frame.Method value: The frame to check - :rtype: bool - - """ - if not value: - return False - return isinstance(value.method, spec.Connection.Close) - def _is_method_frame(self, value): """Returns true if the frame is a method frame. @@ -1250,32 +1250,29 @@ class Connection(object): # Start the communication with the RabbitMQ Broker self._send_frame(frame.ProtocolHeader()) - def _on_connection_closed(self, method_frame, from_adapter=False): - """Called when the connection is closed remotely. The from_adapter value - will be true if the connection adapter has been disconnected from - the broker and the method was invoked directly instead of by receiving - a Connection.Close frame. + def _on_connection_close(self, method_frame): + """Called when the connection is closed remotely via Connection.Close + frame from broker. - :param pika.frame.Method: The Connection.Close frame - :param bool from_adapter: Called by the connection adapter + :param pika.frame.Method method_frame: The Connection.Close frame """ - if method_frame and self._is_connection_close_frame(method_frame): - self.closing = (method_frame.method.reply_code, - method_frame.method.reply_text) + LOGGER.debug('_on_connection_close: frame=%s', method_frame) - # Save the codes because self.closing gets reset by _adapter_disconnect - reply_code, reply_text = self.closing + self.closing = (method_frame.method.reply_code, + method_frame.method.reply_text) - # Stop the heartbeat checker if it exists - self._remove_heartbeat() + self._on_terminate(self.closing[0], self.closing[1]) - # If this did not come from the connection adapter, close the socket - if not from_adapter: - self._adapter_disconnect() + def _on_connection_close_ok(self, method_frame): + """Called when Connection.CloseOk is received from remote. - # Invoke a method frame neutral close - self._on_disconnect(reply_code, reply_text) + :param pika.frame.Method method_frame: The Connection.CloseOk frame + + """ + LOGGER.debug('_on_connection_close_ok: frame=%s', method_frame) + + self._on_terminate(self.closing[0], self.closing[1]) def _on_connection_error(self, connection_unused, error_message=None): """Default behavior when the connecting connection can not connect. @@ -1294,9 +1291,6 @@ class Connection(object): """ self.known_hosts = method_frame.method.known_hosts - # Add a callback handler for the Broker telling us to disconnect - self.callbacks.add(0, spec.Connection.Close, self._on_connection_closed) - # We're now connected at the AMQP level self._set_connection_state(self.CONNECTION_OPEN) @@ -1368,27 +1362,89 @@ class Connection(object): self._trim_frame_buffer(consumed_count) self._process_frame(frame_value) - def _on_disconnect(self, reply_code, reply_text): - """Invoke passing in the reply_code and reply_text from internal - methods to the adapter. Called from on_connection_closed and Heartbeat - timeouts. - - :param str reply_code: The numeric close code - :param str reply_text: The text close reason + def _on_terminate(self, reason_code, reason_text): + """Terminate the connection and notify registered ON_CONNECTION_ERROR + and/or ON_CONNECTION_CLOSED callbacks + :param integer reason_code: HTTP error code for AMQP-reported closures + or -1 for other errors (such as socket errors) + :param str reason_text: human-readable text message describing the error """ - LOGGER.warning('Disconnected from RabbitMQ at %s:%i (%s): %s', - self.params.host, self.params.port, reply_code, - reply_text) + LOGGER.warning( + 'Disconnected from RabbitMQ at %s:%i (%s): %s', + self.params.host, self.params.port, reason_code, + reason_text) + + if not isinstance(reason_code, numbers.Integral): + raise TypeError('reason_code must be an integer, but got %r' + % (reason_code,)) + + # Stop the heartbeat checker if it exists + self._remove_heartbeat() + + # Remove connection management callbacks + # TODO: This call was moved here verbatim from legacy code and the + # following doesn't seem to be right: `Connection.Open` here is + # unexpected, we don't appear to ever register it, and the broker + # shouldn't be sending `Connection.Open` to us, anyway. + self._remove_callbacks(0, [spec.Connection.Close, spec.Connection.Start, + spec.Connection.Open]) + + # Close the socket + self._adapter_disconnect() + + # Determine whether this was an error during connection setup + connection_error = None + + if self.connection_state == self.CONNECTION_PROTOCOL: + LOGGER.error('Incompatible Protocol Versions') + connection_error = exceptions.IncompatibleProtocolError( + reason_code, + reason_text) + elif self.connection_state == self.CONNECTION_START: + LOGGER.error('Connection closed while authenticating indicating a ' + 'probable authentication error') + connection_error = exceptions.ProbableAuthenticationError( + reason_code, + reason_text) + elif self.connection_state == self.CONNECTION_TUNE: + LOGGER.error('Connection closed while tuning the connection ' + 'indicating a probable permission error when ' + 'accessing a virtual host') + connection_error = exceptions.ProbableAccessDeniedError( + reason_code, + reason_text) + elif self.connection_state not in [self.CONNECTION_OPEN, + self.CONNECTION_CLOSED, + self.CONNECTION_CLOSING]: + LOGGER.warning('Unexpected connection state on disconnect: %i', + self.connection_state) + + # Transition to closed state self._set_connection_state(self.CONNECTION_CLOSED) + + # Inform our channel proxies for channel in dictkeys(self._channels): if channel not in self._channels: continue - method_frame = frame.Method(channel, spec.Channel.Close(reply_code, - reply_text)) + method_frame = frame.Method(channel, spec.Channel.Close( + reason_code, + reason_text)) self._channels[channel]._on_close(method_frame) - self._process_connection_closed_callbacks(reply_code, reply_text) - self._remove_connection_callbacks() + + # Inform interested parties + if connection_error is not None: + LOGGER.error('Connection setup failed due to %r', connection_error) + self.callbacks.process(0, + self.ON_CONNECTION_ERROR, + self, self, + connection_error) + + self.callbacks.process(0, self.ON_CONNECTION_CLOSED, self, self, + reason_code, reason_text) + + # Reset connection properties + self._init_connection_state() def _process_callbacks(self, frame_value): """Process the callbacks for the frame if the frame is a method frame @@ -1407,17 +1463,6 @@ class Connection(object): return True return False - def _process_connection_closed_callbacks(self, reason_code, reason_text): - """Process any callbacks that should be called when the connection is - closed. - - :param str reason_code: The numeric code from RabbitMQ for the close - :param str reason_text: The text reason fro closing - - """ - self.callbacks.process(0, self.ON_CONNECTION_CLOSED, self, self, - reason_code, reason_text) - def _process_frame(self, frame_value): """Process an inbound frame from the socket. @@ -1489,11 +1534,6 @@ class Connection(object): for method_frame in method_frames: self._remove_callback(channel_number, method_frame) - def _remove_connection_callbacks(self): - """Remove all callbacks for the connection""" - self._remove_callbacks(0, [spec.Connection.Close, spec.Connection.Start, - spec.Connection.Open]) - def _rpc(self, channel_number, method_frame, callback_method=None, acceptable_replies=None): @@ -1530,7 +1570,7 @@ class Connection(object): """ self._rpc(0, spec.Connection.Close(reply_code, reply_text, 0, 0), - self._on_connection_closed, [spec.Connection.CloseOk]) + self._on_connection_close_ok, [spec.Connection.CloseOk]) def _send_connection_open(self): """Send a Connection.Open frame""" diff --git a/pika/exceptions.py b/pika/exceptions.py index c56f6a0..f219fbb 100644 --- a/pika/exceptions.py +++ b/pika/exceptions.py @@ -26,7 +26,8 @@ class AMQPConnectionError(AMQPError): class IncompatibleProtocolError(AMQPConnectionError): def __repr__(self): - return 'The protocol returned by the server is not supported' + return ('The protocol returned by the server is not supported: %s' % + (self.args,)) class AuthenticationError(AMQPConnectionError): @@ -40,14 +41,15 @@ class ProbableAuthenticationError(AMQPConnectionError): def __repr__(self): return ('Client was disconnected at a connection stage indicating a ' - 'probable authentication error') + 'probable authentication error: %s' % (self.args,)) class ProbableAccessDeniedError(AMQPConnectionError): def __repr__(self): return ('Client was disconnected at a connection stage indicating a ' - 'probable denial of access to the specified virtual host') + 'probable denial of access to the specified virtual host: %s' % + (self.args,)) class NoFreeChannels(AMQPConnectionError): diff --git a/pika/heartbeat.py b/pika/heartbeat.py index bbd5c1f..64026cc 100644 --- a/pika/heartbeat.py +++ b/pika/heartbeat.py @@ -115,10 +115,14 @@ class HeartbeatChecker(object): self._idle_byte_intervals) duration = self._max_idle_count * self._interval text = HeartbeatChecker._STALE_CONNECTION % duration + + # NOTE: this won't achieve the perceived effect of sending + # Connection.Close to broker, because the frame will only get buffered + # in memory before the next statement terminates the connection. self._connection.close(HeartbeatChecker._CONNECTION_FORCED, text) - self._connection._adapter_disconnect() - self._connection._on_disconnect(HeartbeatChecker._CONNECTION_FORCED, - text) + + self._connection._on_terminate(HeartbeatChecker._CONNECTION_FORCED, + text) @property def _has_received_data(self):
Pika drops Connection.Close from broker before Connection.Open-Ok When running in Fedora with RabbitMQ 3.6.0, I noticed that the test async_adapter_tests.TestZ_AccessDenied times out. This test attempts to open a connection using a non-existent vhost. In this scenario, RabbitMQ eventually closes the socket connection (about 30 seconds after Connection.Open with the non-existent vhost from the client). It turns out that RabbitMQ 3.6.0 sends a Connection.Close immediately after receiving Connection.Open with bad vhost from the client. However, there is a bug in pika: pika doesn't register to handle Connection.Close from the broker until it receives Connection.Open-Ok, so pika just drops the Connection.Close frame in this scenario. Pika eventually detects that the connection is closed, subject to whenever RabbitMQ decides to close the connection. pika needs to register its handler for Connection.Close before initiating the connection. `Connection._init_connection_state` might be a good place for it, right next to the call `self._add_connection_start_callback()`.
pika/pika
diff --git a/tests/unit/blocking_connection_tests.py b/tests/unit/blocking_connection_tests.py index 70dc065..2888525 100644 --- a/tests/unit/blocking_connection_tests.py +++ b/tests/unit/blocking_connection_tests.py @@ -3,6 +3,10 @@ Tests for pika.adapters.blocking_connection.BlockingConnection """ + +# Suppress pylint warnings concering access to protected member +# pylint: disable=W0212 + import socket from pika.exceptions import AMQPConnectionError @@ -26,11 +30,11 @@ class BlockingConnectionMockTemplate(blocking_connection.BlockingConnection): pass class SelectConnectionTemplate(blocking_connection.SelectConnection): - is_closed = False - is_closing = False - is_open = True - outbound_buffer = [] - _channels = dict() + is_closed = None + is_closing = None + is_open = None + outbound_buffer = None + _channels = None class BlockingConnectionTests(unittest.TestCase): @@ -41,7 +45,7 @@ class BlockingConnectionTests(unittest.TestCase): def test_constructor(self, select_connection_class_mock): with mock.patch.object(blocking_connection.BlockingConnection, '_process_io_for_connection_setup'): - connection = blocking_connection.BlockingConnection('params') + blocking_connection.BlockingConnection('params') select_connection_class_mock.assert_called_once_with( parameters='params', @@ -153,9 +157,7 @@ class BlockingConnectionTests(unittest.TestCase): with self.assertRaises(pika.exceptions.ConnectionClosed) as cm: connection._flush_output(lambda: False, lambda: True) - self.assertSequenceEqual( - cm.exception.args, - ()) + self.assertSequenceEqual(cm.exception.args, (200, 'ok')) @patch.object(blocking_connection, 'SelectConnection', spec_set=SelectConnectionTemplate) @@ -190,7 +192,7 @@ class BlockingConnectionTests(unittest.TestCase): blocking_connection.BlockingConnection, '_flush_output', spec_set=blocking_connection.BlockingConnection._flush_output): - channel = connection.channel() + connection.channel() @patch.object(blocking_connection, 'SelectConnection', spec_set=SelectConnectionTemplate) diff --git a/tests/unit/connection_tests.py b/tests/unit/connection_tests.py index 7ef7afe..ed8b038 100644 --- a/tests/unit/connection_tests.py +++ b/tests/unit/connection_tests.py @@ -2,6 +2,18 @@ Tests for pika.connection.Connection """ + +# Suppress pylint warnings concerning access to protected member +# pylint: disable=W0212 + +# Suppress pylint messages concerning missing docstrings +# pylint: disable=C0111 + +# Suppress pylint messages concerning invalid method name +# pylint: disable=C0103 + + + try: import mock except ImportError: @@ -17,6 +29,7 @@ except ImportError: from pika import connection from pika import channel from pika import credentials +from pika import exceptions from pika import frame from pika import spec from pika.compat import xrange, urlencode @@ -97,15 +110,128 @@ class ConnectionTests(unittest.TestCase): self.assertFalse(on_close_ready.called, '_on_close_ready should not have been called') - def test_on_disconnect(self): - """if connection isn't closing _on_close_ready should not be called""" - self.connection._on_disconnect(0, 'Undefined') + def test_on_terminate_cleans_up(self): + """_on_terminate cleans up heartbeat, adapter, and channels""" + heartbeat = mock.Mock() + self.connection.heartbeat = heartbeat + self.connection._adapter_disconnect = mock.Mock() + + self.connection._on_terminate(0, 'Undefined') + + heartbeat.stop.assert_called_once_with() + self.connection._adapter_disconnect.assert_called_once_with() + self.assertTrue(self.channel._on_close.called, 'channel._on_close should have been called') method_frame = self.channel._on_close.call_args[0][0] self.assertEqual(method_frame.method.reply_code, 0) self.assertEqual(method_frame.method.reply_text, 'Undefined') + self.assertTrue(self.connection.is_closed) + + def test_on_terminate_invokes_connection_closed_callback(self): + """_on_terminate invokes `Connection.ON_CONNECTION_CLOSED` callbacks""" + self.connection.callbacks.process = mock.Mock( + wraps=self.connection.callbacks.process) + + self.connection._adapter_disconnect = mock.Mock() + + self.connection._on_terminate(1, 'error text') + + self.connection.callbacks.process.assert_called_once_with( + 0, self.connection.ON_CONNECTION_CLOSED, + self.connection, self.connection, + 1, 'error text') + + with self.assertRaises(AssertionError): + self.connection.callbacks.process.assert_any_call( + 0, self.connection.ON_CONNECTION_ERROR, + self.connection, self.connection, + mock.ANY) + + def test_on_terminate_invokes_protocol_on_connection_error_and_closed(self): + """_on_terminate invokes `ON_CONNECTION_ERROR` with `IncompatibleProtocolError` and `ON_CONNECTION_CLOSED` callbacks""" + with mock.patch.object(self.connection.callbacks, 'process'): + + self.connection._adapter_disconnect = mock.Mock() + + self.connection._set_connection_state( + self.connection.CONNECTION_PROTOCOL) + + self.connection._on_terminate(1, 'error text') + + self.assertEqual(self.connection.callbacks.process.call_count, 2) + + self.connection.callbacks.process.assert_any_call( + 0, self.connection.ON_CONNECTION_ERROR, + self.connection, self.connection, + mock.ANY) + + conn_exc = self.connection.callbacks.process.call_args_list[0][0][4] + self.assertIs(type(conn_exc), exceptions.IncompatibleProtocolError) + self.assertSequenceEqual(conn_exc.args, [1, 'error text']) + + self.connection.callbacks.process.assert_any_call( + 0, self.connection.ON_CONNECTION_CLOSED, + self.connection, self.connection, + 1, 'error text') + + def test_on_terminate_invokes_auth_on_connection_error_and_closed(self): + """_on_terminate invokes `ON_CONNECTION_ERROR` with `ProbableAuthenticationError` and `ON_CONNECTION_CLOSED` callbacks""" + with mock.patch.object(self.connection.callbacks, 'process'): + + self.connection._adapter_disconnect = mock.Mock() + + self.connection._set_connection_state( + self.connection.CONNECTION_START) + + self.connection._on_terminate(1, 'error text') + + self.assertEqual(self.connection.callbacks.process.call_count, 2) + + self.connection.callbacks.process.assert_any_call( + 0, self.connection.ON_CONNECTION_ERROR, + self.connection, self.connection, + mock.ANY) + + conn_exc = self.connection.callbacks.process.call_args_list[0][0][4] + self.assertIs(type(conn_exc), + exceptions.ProbableAuthenticationError) + self.assertSequenceEqual(conn_exc.args, [1, 'error text']) + + self.connection.callbacks.process.assert_any_call( + 0, self.connection.ON_CONNECTION_CLOSED, + self.connection, self.connection, + 1, 'error text') + + def test_on_terminate_invokes_access_denied_on_connection_error_and_closed( + self): + """_on_terminate invokes `ON_CONNECTION_ERROR` with `ProbableAccessDeniedError` and `ON_CONNECTION_CLOSED` callbacks""" + with mock.patch.object(self.connection.callbacks, 'process'): + + self.connection._adapter_disconnect = mock.Mock() + + self.connection._set_connection_state( + self.connection.CONNECTION_TUNE) + + self.connection._on_terminate(1, 'error text') + + self.assertEqual(self.connection.callbacks.process.call_count, 2) + + self.connection.callbacks.process.assert_any_call( + 0, self.connection.ON_CONNECTION_ERROR, + self.connection, self.connection, + mock.ANY) + + conn_exc = self.connection.callbacks.process.call_args_list[0][0][4] + self.assertIs(type(conn_exc), exceptions.ProbableAccessDeniedError) + self.assertSequenceEqual(conn_exc.args, [1, 'error text']) + + self.connection.callbacks.process.assert_any_call( + 0, self.connection.ON_CONNECTION_CLOSED, + self.connection, self.connection, + 1, 'error text') + @mock.patch('pika.connection.Connection.connect') def test_new_conn_should_use_first_channel(self, connect): """_next_channel_number in new conn should always be 1""" @@ -124,12 +250,12 @@ class ConnectionTests(unittest.TestCase): """make sure the callback adding works""" self.connection.callbacks = mock.Mock(spec=self.connection.callbacks) for test_method, expected_key in ( - (self.connection.add_backpressure_callback, - self.connection.ON_CONNECTION_BACKPRESSURE), - (self.connection.add_on_open_callback, - self.connection.ON_CONNECTION_OPEN), - (self.connection.add_on_close_callback, - self.connection.ON_CONNECTION_CLOSED)): + (self.connection.add_backpressure_callback, + self.connection.ON_CONNECTION_BACKPRESSURE), + (self.connection.add_on_open_callback, + self.connection.ON_CONNECTION_OPEN), + (self.connection.add_on_close_callback, + self.connection.ON_CONNECTION_CLOSED)): self.connection.callbacks.reset_mock() test_method(callback_method) self.connection.callbacks.add.assert_called_once_with( @@ -234,12 +360,13 @@ class ConnectionTests(unittest.TestCase): } #Test Type Errors for bad_field, bad_value in ( - ('host', 15672), ('port', '5672'), ('virtual_host', True), - ('channel_max', '4'), ('frame_max', '5'), ('credentials', 'bad'), - ('locale', 1), ('heartbeat_interval', '6'), - ('socket_timeout', '42'), ('retry_delay', 'two'), - ('backpressure_detection', 'true'), ('ssl', {'ssl': 'dict'}), - ('ssl_options', True), ('connection_attempts', 'hello')): + ('host', 15672), ('port', '5672'), ('virtual_host', True), + ('channel_max', '4'), ('frame_max', '5'), + ('credentials', 'bad'), ('locale', 1), + ('heartbeat_interval', '6'), ('socket_timeout', '42'), + ('retry_delay', 'two'), ('backpressure_detection', 'true'), + ('ssl', {'ssl': 'dict'}), ('ssl_options', True), + ('connection_attempts', 'hello')): bkwargs = copy.deepcopy(kwargs) bkwargs[bad_field] = bad_value self.assertRaises(TypeError, connection.ConnectionParameters, @@ -371,20 +498,28 @@ class ConnectionTests(unittest.TestCase): self.assertEqual(['ab'], list(self.connection.outbound_buffer)) self.assertEqual('hearbeat obj', self.connection.heartbeat) - def test_on_connection_closed(self): - """make sure connection close sends correct frames""" + def test_on_connection_close(self): + """make sure _on_connection_close terminates connection""" method_frame = mock.Mock() method_frame.method = mock.Mock(spec=spec.Connection.Close) method_frame.method.reply_code = 1 method_frame.method.reply_text = 'hello' - heartbeat = mock.Mock() - self.connection.heartbeat = heartbeat - self.connection._adapter_disconnect = mock.Mock() - self.connection._on_connection_closed(method_frame, from_adapter=False) + self.connection._on_terminate = mock.Mock() + self.connection._on_connection_close(method_frame) #Check - self.assertTupleEqual((1, 'hello'), self.connection.closing) - heartbeat.stop.assert_called_once_with() - self.connection._adapter_disconnect.assert_called_once_with() + self.connection._on_terminate.assert_called_once_with(1, 'hello') + + def test_on_connection_close_ok(self): + """make sure _on_connection_close_ok terminates connection""" + method_frame = mock.Mock() + method_frame.method = mock.Mock(spec=spec.Connection.CloseOk) + self.connection.closing = (1, 'bye') + self.connection._on_terminate = mock.Mock() + + self.connection._on_connection_close_ok(method_frame) + + #Check + self.connection._on_terminate.assert_called_once_with(1, 'bye') @mock.patch('pika.frame.decode_frame') def test_on_data_available(self, decode_frame): diff --git a/tests/unit/heartbeat_tests.py b/tests/unit/heartbeat_tests.py index 62aa777..4149eef 100644 --- a/tests/unit/heartbeat_tests.py +++ b/tests/unit/heartbeat_tests.py @@ -2,6 +2,16 @@ Tests for pika.heartbeat """ + +# Suppress pylint warnings concering access to protected member +# pylint: disable=W0212 + +# Suppress pylint messages concering missing docstring +# pylint: disable=C0111 + +# Suppress pylint messages concering invalid method name +# pylint: disable=C0103 + try: import mock except ImportError: @@ -58,7 +68,7 @@ class HeartbeatTests(unittest.TestCase): @mock.patch('pika.heartbeat.HeartbeatChecker._setup_timer') def test_constructor_called_setup_timer(self, timer): - obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) + heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL) timer.assert_called_once_with() def test_active_true(self): @@ -135,9 +145,8 @@ class HeartbeatTests(unittest.TestCase): self.obj._interval) self.mock_conn.close.assert_called_once_with( self.obj._CONNECTION_FORCED, reason) - self.mock_conn._on_disconnect.assert_called_once_with( + self.mock_conn._on_terminate.assert_called_once_with( self.obj._CONNECTION_FORCED, reason) - self.mock_conn._adapter_disconnect.assert_called_once_with() def test_has_received_data_false(self): self.obj._bytes_received = 100
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 11 }
0.10
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "coverage", "codecov", "mock", "nose", "tornado", "twisted", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y libev-dev" ], "python": "3.5", "reqs_path": [ "test-requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 charset-normalizer==2.0.12 codecov==2.1.13 coverage==6.2 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 mock==5.2.0 nose==1.3.7 packaging==21.3 -e git+https://github.com/pika/pika.git@8be81a21d8b554ee9af4fae08907956e5b8b138f#egg=pika pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 requests==2.27.1 tomli==1.2.3 tornado==6.1 Twisted==15.3.0 typing_extensions==4.1.1 urllib3==1.26.20 zipp==3.6.0 zope.interface==5.5.2
name: pika channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - charset-normalizer==2.0.12 - codecov==2.1.13 - coverage==6.2 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - mock==5.2.0 - nose==1.3.7 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - requests==2.27.1 - tomli==1.2.3 - tornado==6.1 - twisted==15.3.0 - typing-extensions==4.1.1 - urllib3==1.26.20 - zipp==3.6.0 - zope-interface==5.5.2 prefix: /opt/conda/envs/pika
[ "tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_flush_output_server_initiated_no_error_close", "tests/unit/connection_tests.py::ConnectionTests::test_on_connection_close", "tests/unit/connection_tests.py::ConnectionTests::test_on_connection_close_ok", "tests/unit/connection_tests.py::ConnectionTests::test_on_terminate_cleans_up", "tests/unit/connection_tests.py::ConnectionTests::test_on_terminate_invokes_access_denied_on_connection_error_and_closed", "tests/unit/connection_tests.py::ConnectionTests::test_on_terminate_invokes_auth_on_connection_error_and_closed", "tests/unit/connection_tests.py::ConnectionTests::test_on_terminate_invokes_connection_closed_callback", "tests/unit/connection_tests.py::ConnectionTests::test_on_terminate_invokes_protocol_on_connection_error_and_closed", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_close" ]
[ "tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_connection_attempts_with_timeout", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_setup_timer_called" ]
[ "tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_channel", "tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_close", "tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_constructor", "tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_flush_output", "tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_flush_output_server_initiated_error_close", "tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_flush_output_user_initiated_close", "tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_process_io_for_connection_setup", "tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_process_io_for_connection_setup_fails_with_open_error", "tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_sleep", "tests/unit/connection_tests.py::ConnectionTests::test_add_callbacks", "tests/unit/connection_tests.py::ConnectionTests::test_add_on_close_callback", "tests/unit/connection_tests.py::ConnectionTests::test_add_on_open_error_callback", "tests/unit/connection_tests.py::ConnectionTests::test_bad_type_connection_parameters", "tests/unit/connection_tests.py::ConnectionTests::test_channel", "tests/unit/connection_tests.py::ConnectionTests::test_client_properties", "tests/unit/connection_tests.py::ConnectionTests::test_close_channels", "tests/unit/connection_tests.py::ConnectionTests::test_close_closes_open_channels", "tests/unit/connection_tests.py::ConnectionTests::test_close_ignores_closed_channels", "tests/unit/connection_tests.py::ConnectionTests::test_connect", "tests/unit/connection_tests.py::ConnectionTests::test_connect_reconnect", "tests/unit/connection_tests.py::ConnectionTests::test_good_connection_parameters", "tests/unit/connection_tests.py::ConnectionTests::test_new_conn_should_use_first_channel", "tests/unit/connection_tests.py::ConnectionTests::test_next_channel_number_returns_lowest_unused", "tests/unit/connection_tests.py::ConnectionTests::test_on_channel_cleanup_no_open_channels", "tests/unit/connection_tests.py::ConnectionTests::test_on_channel_cleanup_non_closing_state", "tests/unit/connection_tests.py::ConnectionTests::test_on_channel_cleanup_open_channels", "tests/unit/connection_tests.py::ConnectionTests::test_on_close_ready_no_open_channels", "tests/unit/connection_tests.py::ConnectionTests::test_on_close_ready_open_channels", "tests/unit/connection_tests.py::ConnectionTests::test_on_connection_start", "tests/unit/connection_tests.py::ConnectionTests::test_on_connection_tune", "tests/unit/connection_tests.py::ConnectionTests::test_on_data_available", "tests/unit/connection_tests.py::ConnectionTests::test_process_url", "tests/unit/connection_tests.py::ConnectionTests::test_set_backpressure_multiplier", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_active_false", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_active_true", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_bytes_received_on_connection", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_false", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_true", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_connection", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_heartbeat_interval", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_called_setup_timer", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_sent", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_sent", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_idle_byte_intervals", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_default_initialization_max_idle_count", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_false", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_true", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_new_heartbeat_frame", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_increment_bytes", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_increment_no_bytes", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_missed_bytes", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_not_closed", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_send_heartbeat_frame", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_start_timer", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_update_counters", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_counter_incremented", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_send_frame_called", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_timer_active", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_timer_not_active", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_received", "tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_sent" ]
[]
BSD 3-Clause "New" or "Revised" License
366
joke2k__faker-318
807bf01588fd5dd9f680d69d1c6ddd13c255136f
2016-01-04 15:33:52
883576c2d718ad7f604415e02a898f1f917d5b86
diff --git a/README.rst b/README.rst index 4dc04a86..0941dbda 100644 --- a/README.rst +++ b/README.rst @@ -263,13 +263,26 @@ How to use with factory-boy title = factory.LazyAttribute(lambda x: faker.sentence(nb_words=4)) author_name = factory.LazyAttribute(lambda x: faker.name()) +Accessing the `random` instance +------------------------------- + +The ``.random`` property on the generator returns the instance of ``random.Random`` +used to generate the values: + +__ code:: python + + from faker import Faker + fake = Faker() + fake.random + fake.random.getstate() + Seeding the Generator --------------------- When using Faker for unit testing, you will often want to generate the same -data set. The generator offers a ``seed()`` method, which seeds the random -number generator. Calling the same script twice with the same seed produces the -same results. +data set. For convenience, the generator also provide a ``seed()`` method, which +seeds the random number generator. Calling the same script twice with the same +seed produces the same results. .. code:: python @@ -280,8 +293,20 @@ same results. print fake.name() > Margaret Boehm +The code above is equivalent to the following: + +.. code:: python + + from faker import Faker + fake = Faker() + faker.random.seed(4321) + + print fake.name() + > Margaret Boehm + Tests ----- + Installing dependencies: .. code:: bash diff --git a/docs/index.rst b/docs/index.rst index 601d474d..7e96203c 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -264,13 +264,26 @@ How to use with factory-boy title = factory.LazyAttribute(lambda x: faker.sentence(nb_words=4)) author_name = factory.LazyAttribute(lambda x: faker.name()) +Accessing the `random` instance +------------------------------- + +The ``.random`` property on the generator returns the instance of ``random.Random`` +used to generate the values: + +__ code:: python + + from faker import Faker + fake = Faker() + fake.random + fake.random.getstate() + Seeding the Generator --------------------- When using Faker for unit testing, you will often want to generate the same -data set. The generator offers a ``seed()`` method, which seeds the random -number generator. Calling the same script twice with the same seed produces the -same results. +data set. For convenience, the generator also provide a ``seed()`` method, which +seeds the random number generator. Calling the same script twice with the same +seed produces the same results. .. code:: python @@ -281,6 +294,17 @@ same results. print fake.name() > Margaret Boehm +The code above is equivalent to the following: + +.. code:: python + + from faker import Faker + fake = Faker() + faker.random.seed(4321) + + print fake.name() + > Margaret Boehm + Tests ----- diff --git a/faker/generator.py b/faker/generator.py index 95dfac2a..74034cb4 100644 --- a/faker/generator.py +++ b/faker/generator.py @@ -50,6 +50,10 @@ class Generator(object): """Returns added providers.""" return self.providers + @property + def random(self): + return random + def seed(self, seed=None): """Calls random.seed""" random.seed(seed)
Access to the Generator.random It would be nice if one could gain access to the Generator.random variable so that one could save/set the state. I realize I can pass in the seed, but one currently has no way of gathering what the seed/state is if using the automatically generated seed. I don't want to use a fixed seed, but I do want to log/print the seed used _if_ the tests fail. That is, I'd like to be able to do something like: `faker.generator.getstate()` (which gets the random state w/o exposing random) or `faker.generator.random.getstate()` (which gives access to the random variable) For now, the workaround appears to be to create a Faker object with your own Generator.
joke2k/faker
diff --git a/faker/tests/__init__.py b/faker/tests/__init__.py index 6502a448..5dc22528 100644 --- a/faker/tests/__init__.py +++ b/faker/tests/__init__.py @@ -518,6 +518,12 @@ class GeneratorTestCase(unittest.TestCase): def setUp(self): self.generator = Generator() + @patch('random.getstate') + def test_get_random(self, mock_system_random): + random_instance = self.generator.random + random_instance.getstate() + self.assertFalse(mock_system_random.called) + @patch('random.seed') def test_random_seed_doesnt_seed_system_random(self, mock_system_random): self.generator.seed(0)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 3 }
0.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
coverage==7.8.0 exceptiongroup==1.2.2 -e git+https://github.com/joke2k/faker.git@807bf01588fd5dd9f680d69d1c6ddd13c255136f#egg=fake_factory iniconfig==2.1.0 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 pytest-cov==6.0.0 python-dateutil==2.9.0.post0 six==1.17.0 tomli==2.2.1
name: faker channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.8.0 - exceptiongroup==1.2.2 - iniconfig==2.1.0 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - pytest-cov==6.0.0 - python-dateutil==2.9.0.post0 - six==1.17.0 - tomli==2.2.1 prefix: /opt/conda/envs/faker
[ "faker/tests/__init__.py::GeneratorTestCase::test_get_random" ]
[]
[ "faker/tests/__init__.py::ShimsTestCase::test_counter", "faker/tests/__init__.py::UtilsTestCase::test_add_dicts", "faker/tests/__init__.py::UtilsTestCase::test_choice_distribution", "faker/tests/__init__.py::UtilsTestCase::test_find_available_locales", "faker/tests/__init__.py::UtilsTestCase::test_find_available_providers", "faker/tests/__init__.py::FactoryTestCase::test_add_provider_gives_priority_to_newly_added_provider", "faker/tests/__init__.py::FactoryTestCase::test_command", "faker/tests/__init__.py::FactoryTestCase::test_command_custom_provider", "faker/tests/__init__.py::FactoryTestCase::test_date_time_between_dates", "faker/tests/__init__.py::FactoryTestCase::test_date_time_between_dates_with_tzinfo", "faker/tests/__init__.py::FactoryTestCase::test_date_time_this_period", "faker/tests/__init__.py::FactoryTestCase::test_date_time_this_period_with_tzinfo", "faker/tests/__init__.py::FactoryTestCase::test_datetime_safe", "faker/tests/__init__.py::FactoryTestCase::test_datetimes_with_and_without_tzinfo", "faker/tests/__init__.py::FactoryTestCase::test_documentor", "faker/tests/__init__.py::FactoryTestCase::test_format_calls_formatter_on_provider", "faker/tests/__init__.py::FactoryTestCase::test_format_transfers_arguments_to_formatter", "faker/tests/__init__.py::FactoryTestCase::test_get_formatter_returns_callable", "faker/tests/__init__.py::FactoryTestCase::test_get_formatter_returns_correct_formatter", "faker/tests/__init__.py::FactoryTestCase::test_get_formatter_throws_exception_on_incorrect_formatter", "faker/tests/__init__.py::FactoryTestCase::test_magic_call_calls_format", "faker/tests/__init__.py::FactoryTestCase::test_magic_call_calls_format_with_arguments", "faker/tests/__init__.py::FactoryTestCase::test_no_words_paragraph", "faker/tests/__init__.py::FactoryTestCase::test_no_words_sentence", "faker/tests/__init__.py::FactoryTestCase::test_parse_returns_same_string_when_it_contains_no_curly_braces", "faker/tests/__init__.py::FactoryTestCase::test_parse_returns_string_with_tokens_replaced_by_formatters", "faker/tests/__init__.py::FactoryTestCase::test_password", "faker/tests/__init__.py::FactoryTestCase::test_prefix_suffix_always_string", "faker/tests/__init__.py::FactoryTestCase::test_random_element", "faker/tests/__init__.py::FactoryTestCase::test_slugify", "faker/tests/__init__.py::FactoryTestCase::test_timezone_conversion", "faker/tests/__init__.py::FactoryTestCase::test_us_ssn_valid", "faker/tests/__init__.py::GeneratorTestCase::test_random_seed_doesnt_seed_system_random" ]
[]
MIT License
367
ntoll__uflash-9
28bc481b67d67cc20aacc1191c87ac1e4c59bb34
2016-01-04 21:42:12
28bc481b67d67cc20aacc1191c87ac1e4c59bb34
ntoll: I'll try to get this reviewed and merged this evening. funkyHat: I've pushed another branch which fixes the test coverage (although one of the new tests is a bit of a beast...) Also removed `uflash help` as mentioned above: https://github.com/funkyHat/uflash/tree/unhexlify-plus ntoll: Can you merge the new branch into this one and I can do a final review..? funkyHat: Done. Want me to squash it into a single commit? ntoll: Please do... :-)
diff --git a/uflash.py b/uflash.py index 61fe83d..f5cd8a6 100644 --- a/uflash.py +++ b/uflash.py @@ -3,6 +3,7 @@ This module contains functions for turning a Python script into a .hex file and flashing it onto a BBC micro:bit. """ +import argparse import sys import os import struct @@ -19,8 +20,6 @@ _SCRIPT_ADDR = 0x3e000 _HELP_TEXT = """ Flash Python onto the BBC micro:bit -Usage: uflash [path_to_script.py] [path_to_microbit] - If no path to the micro:bit is provided uflash will attempt to autodetect the correct path to the device. If no path to the Python script is provided uflash will flash the unmodified MicroPython firmware onto the device. @@ -72,6 +71,23 @@ def hexlify(script): return '\n'.join(output) +def unhexlify(blob): + """ + Takes a hexlified script and turns it back into Python code. + """ + lines = blob.split('\n')[1:] + output = [] + for line in lines: + # Discard the address, length etc. and reverse the hexlification + output.append(binascii.unhexlify(line[9:-2])) + # Strip off "MP<size>" from the start + output[0] = output[0][4:] + # and strip any null bytes from the end + output[-1] = output[-1].strip(b'\x00') + script = b''.join(output) + return script + + def embed_hex(runtime_hex, python_hex=None): """ Given a string representing the MicroPython runtime hex, will embed a @@ -98,6 +114,28 @@ def embed_hex(runtime_hex, python_hex=None): return '\n'.join(embedded_list) + '\n' +def extract_script(embedded_hex): + """ + Given a hex file containing the MicroPython runtime and an embedded Python + script, will extract the original script. + + Returns a string containing the original embedded script. + """ + hex_lines = embedded_hex.split('\n') + # Find the marker in the hex that comes just before the script + try: + start_line = hex_lines.index(':08058000193901005D150000AE') + 1 + except ValueError as e: + raise ValueError('Bad input hex file:', e) + # Recombine the lines after that, but leave out the last 3 lines + blob = '\n'.join(hex_lines[start_line:-3]) + if blob == '': + # If the result is the empty string, there was no embedded script + return b'' + # Pass the extracted hex through unhexlify + return unhexlify(blob) + + def find_microbit(): """ Returns a path on the filesystem that represents the plugged in BBC @@ -179,6 +217,8 @@ def flash(path_to_python=None, path_to_microbit=None): # Grab the Python script (if needed). python_hex = '' if path_to_python: + if not path_to_python.endswith('.py'): + raise ValueError('Python files must end in ".py".') with open(path_to_python, 'rb') as python_script: python_hex = hexlify(python_script.read()) # Generate the resulting hex file. @@ -195,6 +235,20 @@ def flash(path_to_python=None, path_to_microbit=None): raise IOError('Unable to find micro:bit. Is it plugged in?') +def extract(path_to_hex=None, output_path=None): + """ + Given a hex file this function will attempt to extract the embedded script + from it and save it either to output_path or stdout + """ + with open(path_to_hex, 'r') as hex_file: + python_script = extract_script(hex_file.read()) + if output_path is not None: + with open(output_path, 'w') as output_file: + output_file.write(python_script) + else: + print(python_script.decode('utf-8')) + + def main(argv=None): """ Entry point for the command line tool 'uflash'. @@ -210,20 +264,21 @@ def main(argv=None): """ if not argv: argv = sys.argv[1:] - arg_len = len(argv) try: - if arg_len == 0: - flash() - elif arg_len >= 1: - if argv[0] == 'help': - print(_HELP_TEXT) - return - if not argv[0].lower().endswith('.py'): - raise ValueError('Python files must end in ".py".') - if arg_len == 1: - flash(argv[0]) - elif arg_len > 1: - flash(argv[0], argv[1]) + parser = argparse.ArgumentParser(description=_HELP_TEXT) + parser.add_argument('source', nargs='?', default=None) + parser.add_argument('target', nargs='?', default=None) + parser.add_argument('-e', '--extract', + action='store_true', + help="""Extract python source from a hex file + instead of creating the hex file""", + ) + args = parser.parse_args(argv) + + if args.extract: + extract(args.source, args.target) + else: + flash(args.source, args.target) except Exception as ex: # The exception of no return. Print the exception information. print(ex)
Add ability to extract Python code from a .hex file. Because sometimes, you don't save the source file... ;-)
ntoll/uflash
diff --git a/tests/test_uflash.py b/tests/test_uflash.py index 5434084..eb0c87c 100644 --- a/tests/test_uflash.py +++ b/tests/test_uflash.py @@ -39,6 +39,15 @@ def test_hexlify(): assert len(lines) == 5 +def test_unhexlify(): + """ + Ensure that we can get the script back out using unhexlify + """ + hexlified = uflash.hexlify(TEST_SCRIPT) + unhexlified = uflash.unhexlify(hexlified) + assert unhexlified == TEST_SCRIPT + + def test_hexlify_empty_script(): """ The function returns an empty string if the script is empty. @@ -84,6 +93,32 @@ def test_embed_no_runtime(): assert ex.value.args[0] == 'MicroPython runtime hex required.' +def test_extract(): + """ + The script should be returned if there is one + """ + python = uflash.hexlify(TEST_SCRIPT) + result = uflash.embed_hex(uflash._RUNTIME, python) + extracted = uflash.extract_script(result) + assert extracted == TEST_SCRIPT + + +def test_extract_not_valid_hex(): + """ + Return a sensible message if the hex file isn't valid + """ + with pytest.raises(ValueError) as e: + uflash.extract_script('invalid input') + assert 'Bad input hex file' in e.value.args[0] + + +def test_extract_no_python(): + """ + What to do here? + """ + assert uflash.extract_script(uflash._RUNTIME) == b'' + + def test_find_microbit_posix_exists(): """ Simulate being on os.name == 'posix' and a call to "mount" returns a @@ -278,8 +313,7 @@ def test_main_no_args(): with mock.patch('sys.argv', ['uflash', ]): with mock.patch('uflash.flash') as mock_flash: uflash.main() - assert mock_flash.call_count == 1 - assert mock_flash.call_args == () + assert mock_flash.called_once_with(None, None) def test_main_first_arg_python(): @@ -322,11 +356,55 @@ def test_main_two_args(): assert mock_flash.called_once_with('foo.py', '/media/foo/bar') -def test_main_extra_args_ignored(): +def test_extract_command(): """ - Any arguments more than two are ignored, with only the first two passed - into the flash() function. + Test the command-line script extract feature """ - with mock.patch('uflash.flash') as mock_flash: - uflash.main(argv=['foo.py', '/media/foo/bar', 'baz', 'quux']) - assert mock_flash.called_once_with('foo.py', '/media/foo/bar') + with mock.patch('uflash.extract') as mock_extract: + uflash.main(argv=['-e', 'hex.hex', 'foo.py']) + assert mock_extract.called_once_with('hex.hex', 'foo.py') + + +def test_extract_paths(): + """ + Test the different paths of the extract() function. + It should open and extract the contents of the file (input arg) + When called with only an input it should print the output of extract_script + When called with two arguments it should write the output to the output arg + """ + mock_e = mock.MagicMock(return_value=mock.sentinel.script) + mock_o = mock.MagicMock() + mock_o.return_value.__enter__ = lambda s: s + mock_o.return_value.__exit__ = mock.Mock() + mock_o.return_value.read.return_value = 'script' + mock_o.return_value.write = mock.Mock() + + with mock.patch('uflash.extract_script', mock_e) as mock_extract_script, \ + mock.patch('builtins.print') as mock_print, \ + mock.patch('builtins.open', mock_o) as mock_open: + uflash.extract('foo.hex') + assert mock_open.called_once_with('foo.hex') + assert mock_extract_script.called_once_with(mock.sentinel.file_handle) + assert mock_print.called_once_with(mock.sentinel.script) + + uflash.extract('foo.hex', 'out.py') + assert mock_open.call_count == 3 + assert mock_open.called_with('out.py', 'w') + assert mock_open.return_value.write.call_count == 1 + + +def test_extract_command_source_only(): + """ + If there is no target file the extract command should write to stdout + """ + with mock.patch('uflash.extract') as mock_extract: + uflash.main(argv=['hex.hex']) + assert mock_extract.called_once_with('hex.hex') + + +def test_extract_command_no_source(): + """ + If there is no source file the extract command should complain + """ + with pytest.raises(TypeError): + uflash.extract(None, None)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 3, "test_score": 0 }, "num_modified_files": 1 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
alabaster==0.7.16 babel==2.17.0 certifi==2025.1.31 charset-normalizer==3.4.1 coverage==7.8.0 docutils==0.21.2 exceptiongroup==1.2.2 idna==3.10 imagesize==1.4.1 importlib_metadata==8.6.1 iniconfig==2.1.0 Jinja2==3.1.6 MarkupSafe==3.0.2 packaging==24.2 pep8==1.7.1 pluggy==1.5.0 pyflakes==3.3.1 Pygments==2.19.1 pytest==8.3.5 pytest-cov==6.0.0 requests==2.32.3 snowballstemmer==2.2.0 Sphinx==7.4.7 sphinxcontrib-applehelp==2.0.0 sphinxcontrib-devhelp==2.0.0 sphinxcontrib-htmlhelp==2.1.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==2.0.0 sphinxcontrib-serializinghtml==2.0.0 tomli==2.2.1 -e git+https://github.com/ntoll/uflash.git@28bc481b67d67cc20aacc1191c87ac1e4c59bb34#egg=uflash urllib3==2.3.0 zipp==3.21.0
name: uflash channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - alabaster==0.7.16 - babel==2.17.0 - certifi==2025.1.31 - charset-normalizer==3.4.1 - coverage==7.8.0 - docutils==0.21.2 - exceptiongroup==1.2.2 - idna==3.10 - imagesize==1.4.1 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - jinja2==3.1.6 - markupsafe==3.0.2 - packaging==24.2 - pep8==1.7.1 - pluggy==1.5.0 - pyflakes==3.3.1 - pygments==2.19.1 - pytest==8.3.5 - pytest-cov==6.0.0 - requests==2.32.3 - snowballstemmer==2.2.0 - sphinx==7.4.7 - sphinxcontrib-applehelp==2.0.0 - sphinxcontrib-devhelp==2.0.0 - sphinxcontrib-htmlhelp==2.1.0 - sphinxcontrib-jsmath==1.0.1 - sphinxcontrib-qthelp==2.0.0 - sphinxcontrib-serializinghtml==2.0.0 - tomli==2.2.1 - urllib3==2.3.0 - zipp==3.21.0 prefix: /opt/conda/envs/uflash
[ "tests/test_uflash.py::test_unhexlify", "tests/test_uflash.py::test_extract", "tests/test_uflash.py::test_extract_not_valid_hex", "tests/test_uflash.py::test_extract_no_python", "tests/test_uflash.py::test_extract_command", "tests/test_uflash.py::test_extract_paths", "tests/test_uflash.py::test_extract_command_source_only", "tests/test_uflash.py::test_extract_command_no_source" ]
[]
[ "tests/test_uflash.py::test_get_version", "tests/test_uflash.py::test_hexlify", "tests/test_uflash.py::test_hexlify_empty_script", "tests/test_uflash.py::test_embed_hex", "tests/test_uflash.py::test_embed_no_python", "tests/test_uflash.py::test_embed_no_runtime", "tests/test_uflash.py::test_find_microbit_posix_exists", "tests/test_uflash.py::test_find_microbit_posix_missing", "tests/test_uflash.py::test_find_microbit_nt_exists", "tests/test_uflash.py::test_find_microbit_nt_missing", "tests/test_uflash.py::test_find_microbit_unknown_os", "tests/test_uflash.py::test_save_hex", "tests/test_uflash.py::test_save_hex_no_hex", "tests/test_uflash.py::test_save_hex_path_not_to_hex_file", "tests/test_uflash.py::test_flash_no_args", "tests/test_uflash.py::test_flash_has_python_no_path_to_microbit", "tests/test_uflash.py::test_flash_with_paths", "tests/test_uflash.py::test_flash_cannot_find_microbit", "tests/test_uflash.py::test_flash_wrong_python", "tests/test_uflash.py::test_main_no_args", "tests/test_uflash.py::test_main_first_arg_python", "tests/test_uflash.py::test_main_first_arg_help", "tests/test_uflash.py::test_main_first_arg_not_python", "tests/test_uflash.py::test_main_two_args" ]
[]
MIT License
368
networkx__networkx-1908
e0479d2e090ec301de9612330585e9bc8d1f967c
2016-01-05 18:21:56
e0479d2e090ec301de9612330585e9bc8d1f967c
diff --git a/networkx/algorithms/matching.py b/networkx/algorithms/matching.py index afc505e5f..6bb064dee 100644 --- a/networkx/algorithms/matching.py +++ b/networkx/algorithms/matching.py @@ -46,7 +46,7 @@ def maximal_matching(G): for u,v in G.edges(): # If the edge isn't covered, add it to the matching # then remove neighborhood of u and v from consideration. - if u not in nodes and v not in nodes: + if u not in nodes and v not in nodes and u!=v: matching.add((u,v)) nodes.add(u) nodes.add(v)
maximal_matching and self loops `maximal_matching` does allow self-loops ```py >>> G = nx.Graph([[1,1]]) >>> nx.matching.maximal_matching(G) {(1, 1)} ``` whereas `max_weight_matching` does not ```py >>> nx.matching.max_weight_matching(G) {} ``` Is this expected behaviour? If not, a simple `u != v` check should fix it.
networkx/networkx
diff --git a/networkx/algorithms/tests/test_matching.py b/networkx/algorithms/tests/test_matching.py index 05fa8c1b1..ac86da9bc 100644 --- a/networkx/algorithms/tests/test_matching.py +++ b/networkx/algorithms/tests/test_matching.py @@ -247,6 +247,20 @@ def test_maximal_matching(): vset = set(u for u, v in matching) vset = vset | set(v for u, v in matching) + for edge in graph.edges(): + u, v = edge + ok_(len(set([v]) & vset) > 0 or len(set([u]) & vset) > 0, \ + "not a proper matching!") + graph = nx.Graph() + graph.add_edge(1, 1) + graph.add_edge(1, 2) + graph.add_edge(2, 2) + graph.add_edge(2, 3) + matching = nx.maximal_matching(graph) + assert(len(matching)==1) + vset = set(u for u, v in matching) + vset = vset | set(v for u, v in matching) + for edge in graph.edges(): u, v = edge ok_(len(set([v]) & vset) > 0 or len(set([u]) & vset) > 0, \
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 0, "issue_text_score": 2, "test_score": 1 }, "num_modified_files": 1 }
1.111
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "nose", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y libgdal-dev graphviz" ], "python": "3.6", "reqs_path": [ "requirements/default.txt", "requirements/test.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 decorator==5.1.1 importlib-metadata==4.8.3 iniconfig==1.1.1 -e git+https://github.com/networkx/networkx.git@e0479d2e090ec301de9612330585e9bc8d1f967c#egg=networkx nose==1.3.7 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: networkx channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - decorator==5.1.1 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - nose==1.3.7 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/networkx
[ "networkx/algorithms/tests/test_matching.py::test_maximal_matching" ]
[]
[ "networkx/algorithms/tests/test_matching.py::TestMatching::test_trivial1", "networkx/algorithms/tests/test_matching.py::TestMatching::test_trivial2", "networkx/algorithms/tests/test_matching.py::TestMatching::test_trivial3", "networkx/algorithms/tests/test_matching.py::TestMatching::test_trivial4", "networkx/algorithms/tests/test_matching.py::TestMatching::test_trivial5", "networkx/algorithms/tests/test_matching.py::TestMatching::test_trivial6", "networkx/algorithms/tests/test_matching.py::TestMatching::test_floating_point_weights", "networkx/algorithms/tests/test_matching.py::TestMatching::test_negative_weights", "networkx/algorithms/tests/test_matching.py::TestMatching::test_s_blossom", "networkx/algorithms/tests/test_matching.py::TestMatching::test_s_t_blossom", "networkx/algorithms/tests/test_matching.py::TestMatching::test_nested_s_blossom", "networkx/algorithms/tests/test_matching.py::TestMatching::test_nested_s_blossom_relabel", "networkx/algorithms/tests/test_matching.py::TestMatching::test_nested_s_blossom_expand", "networkx/algorithms/tests/test_matching.py::TestMatching::test_s_blossom_relabel_expand", "networkx/algorithms/tests/test_matching.py::TestMatching::test_nested_s_blossom_relabel_expand", "networkx/algorithms/tests/test_matching.py::TestMatching::test_nasty_blossom1", "networkx/algorithms/tests/test_matching.py::TestMatching::test_nasty_blossom2", "networkx/algorithms/tests/test_matching.py::TestMatching::test_nasty_blossom_least_slack", "networkx/algorithms/tests/test_matching.py::TestMatching::test_nasty_blossom_augmenting", "networkx/algorithms/tests/test_matching.py::TestMatching::test_nasty_blossom_expand_recursively", "networkx/algorithms/tests/test_matching.py::test_maximal_matching_ordering" ]
[]
BSD 3-Clause
369
geowurster__tinymr-13
a387cf72cfc2a18978b77058e1e28f532258ae49
2016-01-07 05:12:23
a387cf72cfc2a18978b77058e1e28f532258ae49
diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000..9c9f9fb --- /dev/null +++ b/.coveragerc @@ -0,0 +1,2 @@ +[run] +omit: tinymr/_backport_heapq.py diff --git a/README.rst b/README.rst index 853c011..d653294 100644 --- a/README.rst +++ b/README.rst @@ -105,7 +105,7 @@ implementation with parallelized map and reduce phases will be added. yield key, sum(values) - def final_reducer(self, pairs): + def output(self, pairs): """ Normally this phase is where the final dataset is written to disk, @@ -186,52 +186,24 @@ that appear below match the ``word`` only because a ``sort`` key was not given. Words that appear in the input text on multiple lines have multiple ``(word, count)`` pairs. A ``count`` of ``2`` would indicate a word that appeared twice on a single line, but our input data does not have this -condition. +condition. Truncated output below. The dictionary values are lists containing +tuples to allow for a sort key, which is explained elsewhere. .. code-block:: python { - 'use': [('use', 1)], - 'new': [('new', 1)], - 'above': [('above', 1)], - 'redistributions': [('redistributions', 1)], - 'source': [('source', 1), ('source', 1)], - 'without': [('without', 1)], - 'notice': [('notice', 1)], - 'redistribution': [('redistribution', 1)], - 'bsd': [('bsd', 1)], - 'that': [('that', 1)], - 'permitted': [('permitted', 1)], - 'forms': [('forms', 1)], - 'rights': [('rights', 1)], - 'must': [('must', 1)], - 'list': [('list', 1)], - 'are': [('are', 1), ('are', 1)], - 'with': [('with', 1)], - 'd': [('d', 1)], - 'license': [('license', 1)], - 'binary': [('binary', 1)], - 'reserved': [('reserved', 1)], - 'or': [('or', 1)], - 'the': [('the', 1), ('the', 1), ('the', 1)], - 'and': [('and', 1), ('and', 1), ('and', 1)], - 'all': [('all', 1)], - 'met': [('met', 1)], - 'this': [('this', 1)], - 'provided': [('provided', 1)], - 'of': [('of', 1), ('of', 1)], - 'c': [('c', 1)], - 'wurster': [('wurster', 1)], - 'code': [('code', 1)], - 'disclaimer': [('disclaimer', 1)], - 'modification': [('modification', 1)], - 'copyright': [('copyright', 1), ('copyright', 1)], - 'retain': [('retain', 1)], 'kevin': [('kevin', 1)], - 'conditions': [('conditions', 1), ('conditions', 1)], - 'following': [('following', 1), ('following', 1)], - 'in': [('in', 1)], '2015': [('2015', 1)] + '2015': [(1,)] + 'above': [(1,)] + 'all': [(1,)] + 'and': [(1,), (1,), (1,)] + 'are': [(1,), (1,)] + 'binary': [(1,)] + 'bsd': [(1,)] + 'c': [(1,)] + 'code': [(1,)] } + **Reduce** Sum ``count`` for each ``word``. @@ -241,62 +213,31 @@ Sum ``count`` for each ``word``. # The ``reducer()`` receives a key and an iterator of values key = 'the' values = (1, 1, 1) - yield key, sum(values) + def reducer(key, values): + yield key, sum(values) **Partition** The reducer does not _have_ to produces the same key it was given, so the data is partitioned by key again, which is superfluous for this wordcount example. Again the keys are kept in case the data is sorted and only match ``word`` -because an optional ``sort`` key was not given. +because an optional ``sort`` key was not given. Truncated output below. .. code-block:: python { - 'following': [('following', 2)], - '2015': [('2015', 1)], - 'reserved': [('reserved', 1)], - 'permitted': [('permitted', 1)], - 'forms': [('forms', 1)], - 'are': [('are', 2)], - 'license': [('license', 1)], - 'c': [('c', 1)], - 'kevin': [('kevin', 1)], - 'without': [('without', 1)], - 'redistribution': [('redistribution', 1)], - 'copyright': [('copyright', 2)], - 'met': [('met', 1)], - 'use': [('use', 1)], - 'the': [('the', 3)], - 'rights': [('rights', 1)], - 'that': [('that', 1)], - 'or': [('or', 1)], - 'this': [('this', 1)], - 'with': [('with', 1)], - 'source': [('source', 2)], - 'new': [('new', 1)], - 'binary': [('binary', 1)], - 'wurster': [('wurster', 1)], - 'list': [('list', 1)], - 'must': [('must', 1)], - 'of': [('of', 2)], - 'retain': [('retain', 1)], - 'modification': [('modification', 1)], - 'and': [('and', 3)], - 'above': [('above', 1)], - 'all': [('all', 1)], - 'redistributions': [('redistributions', 1)], - 'bsd': [('bsd', 1)], - 'in': [('in', 1)], - 'conditions': [('conditions', 2)], - 'disclaimer': [('disclaimer', 1)], - 'd': [('d', 1)], - 'code': [('code', 1)], - 'provided': [('provided', 1)], - 'notice': [('notice', 1)] + '2015': [(1,)] + 'above': [(1,)] + 'all': [(1,)] + 'and': [(3,)] + 'are': [(2,)] + 'binary': [(1,)] + 'bsd': [(1,)] + 'c': [(1,)] + 'code': [(1,)] } -**Final Reduce** +**Output** The default implementation is to return ``(key, iter(values))`` pairs from the ``final_reducer()``, which would look something like: diff --git a/tinymr/__init__.py b/tinymr/__init__.py index 5ccf28f..f283135 100644 --- a/tinymr/__init__.py +++ b/tinymr/__init__.py @@ -5,6 +5,10 @@ Heavily inspired by Spotify's Luigi framework - github.com/Spotify/Luigi """ +import logging +logging.basicConfig() + + __version__ = '0.1' __author__ = 'Kevin Wurster' __email__ = '[email protected]' diff --git a/tinymr/_backport_heapq.py b/tinymr/_backport_heapq.py new file mode 100644 index 0000000..a574351 --- /dev/null +++ b/tinymr/_backport_heapq.py @@ -0,0 +1,620 @@ +# encoding: utf-8 + + +# This module was copied from the cpython source code and maintains its +# original license. Modifications were limited to the changes required to +# support Python 2 + 3 and are provided without any guarantee. For example, +# `yield from generator` calls were replaced with: +# +# for value in generator: +# yield value +# +# and `iterator.__next__` were replaced with: +# +# getattr(iterator, '__next__', getattr(iterator, 'next')) +# +# Python 3's `heapq.merge()` accepts a keyfunc for determining how to sort a +# given object. Code was copied from: +# https://github.com/python/cpython/tree/15572204799fb8506645dc1c448135f4e4ffde00 + + +"""Heap queue algorithm (a.k.a. priority queue). +Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for +all k, counting elements from 0. For the sake of comparison, +non-existing elements are considered to be infinite. The interesting +property of a heap is that a[0] is always its smallest element. +Usage: +heap = [] # creates an empty heap +heappush(heap, item) # pushes a new item on the heap +item = heappop(heap) # pops the smallest item from the heap +item = heap[0] # smallest item on the heap without popping it +heapify(x) # transforms list into a heap, in-place, in linear time +item = heapreplace(heap, item) # pops and returns smallest item, and adds + # new item; the heap size is unchanged +Our API differs from textbook heap algorithms as follows: +- We use 0-based indexing. This makes the relationship between the + index for a node and the indexes for its children slightly less + obvious, but is more suitable since Python uses 0-based indexing. +- Our heappop() method returns the smallest item, not the largest. +These two make it possible to view the heap as a regular Python list +without surprises: heap[0] is the smallest item, and heap.sort() +maintains the heap invariant! +""" + +# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger + +__about__ = """Heap queues +[explanation by François Pinard] +Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for +all k, counting elements from 0. For the sake of comparison, +non-existing elements are considered to be infinite. The interesting +property of a heap is that a[0] is always its smallest element. +The strange invariant above is meant to be an efficient memory +representation for a tournament. The numbers below are `k', not a[k]: + 0 + 1 2 + 3 4 5 6 + 7 8 9 10 11 12 13 14 + 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 +In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In +an usual binary tournament we see in sports, each cell is the winner +over the two cells it tops, and we can trace the winner down the tree +to see all opponents s/he had. However, in many computer applications +of such tournaments, we do not need to trace the history of a winner. +To be more memory efficient, when a winner is promoted, we try to +replace it by something else at a lower level, and the rule becomes +that a cell and the two cells it tops contain three different items, +but the top cell "wins" over the two topped cells. +If this heap invariant is protected at all time, index 0 is clearly +the overall winner. The simplest algorithmic way to remove it and +find the "next" winner is to move some loser (let's say cell 30 in the +diagram above) into the 0 position, and then percolate this new 0 down +the tree, exchanging values, until the invariant is re-established. +This is clearly logarithmic on the total number of items in the tree. +By iterating over all items, you get an O(n ln n) sort. +A nice feature of this sort is that you can efficiently insert new +items while the sort is going on, provided that the inserted items are +not "better" than the last 0'th element you extracted. This is +especially useful in simulation contexts, where the tree holds all +incoming events, and the "win" condition means the smallest scheduled +time. When an event schedule other events for execution, they are +scheduled into the future, so they can easily go into the heap. So, a +heap is a good structure for implementing schedulers (this is what I +used for my MIDI sequencer :-). +Various structures for implementing schedulers have been extensively +studied, and heaps are good for this, as they are reasonably speedy, +the speed is almost constant, and the worst case is not much different +than the average case. However, there are other representations which +are more efficient overall, yet the worst cases might be terrible. +Heaps are also very useful in big disk sorts. You most probably all +know that a big sort implies producing "runs" (which are pre-sorted +sequences, which size is usually related to the amount of CPU memory), +followed by a merging passes for these runs, which merging is often +very cleverly organised[1]. It is very important that the initial +sort produces the longest runs possible. Tournaments are a good way +to that. If, using all the memory available to hold a tournament, you +replace and percolate items that happen to fit the current run, you'll +produce runs which are twice the size of the memory for random input, +and much better for input fuzzily ordered. +Moreover, if you output the 0'th item on disk and get an input which +may not fit in the current tournament (because the value "wins" over +the last output value), it cannot fit in the heap, so the size of the +heap decreases. The freed memory could be cleverly reused immediately +for progressively building a second heap, which grows at exactly the +same rate the first heap is melting. When the first heap completely +vanishes, you switch heaps and start a new run. Clever and quite +effective! +In a word, heaps are useful memory structures to know. I use them in +a few applications, and I think it is good to keep a `heap' module +around. :-) +-------------------- +[1] The disk balancing algorithms which are current, nowadays, are +more annoying than clever, and this is a consequence of the seeking +capabilities of the disks. On devices which cannot seek, like big +tape drives, the story was quite different, and one had to be very +clever to ensure (far in advance) that each tape movement will be the +most effective possible (that is, will best participate at +"progressing" the merge). Some tapes were even able to read +backwards, and this was also used to avoid the rewinding time. +Believe me, real good tape sorts were quite spectacular to watch! +From all times, sorting has always been a Great Art! :-) +""" + + +import functools + + +__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge', + 'nlargest', 'nsmallest', 'heappushpop'] + + +builtin_next = next + + +def heappush(heap, item): + """Push item onto heap, maintaining the heap invariant.""" + heap.append(item) + _siftdown(heap, 0, len(heap)-1) + + +def heappop(heap): + """Pop the smallest item off the heap, maintaining the heap invariant.""" + lastelt = heap.pop() # raises appropriate IndexError if heap is empty + if heap: + returnitem = heap[0] + heap[0] = lastelt + _siftup(heap, 0) + return returnitem + return lastelt + + +def heapreplace(heap, item): + """Pop and return the current smallest value, and add the new item. + This is more efficient than heappop() followed by heappush(), and can be + more appropriate when using a fixed-size heap. Note that the value + returned may be larger than item! That constrains reasonable uses of + this routine unless written as part of a conditional replacement: + if item > heap[0]: + item = heapreplace(heap, item) + """ + returnitem = heap[0] # raises appropriate IndexError if heap is empty + heap[0] = item + _siftup(heap, 0) + return returnitem + + +def heappushpop(heap, item): + """Fast version of a heappush followed by a heappop.""" + if heap and heap[0] < item: + item, heap[0] = heap[0], item + _siftup(heap, 0) + return item + + +def heapify(x): + """Transform list into a heap, in-place, in O(len(x)) time.""" + n = len(x) + # Transform bottom-up. The largest index there's any point to looking at + # is the largest with a child index in-range, so must have 2*i + 1 < n, + # or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so + # j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is + # (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1. + for i in reversed(range(n//2)): + _siftup(x, i) + + +def _heappop_max(heap): + """Maxheap version of a heappop.""" + lastelt = heap.pop() # raises appropriate IndexError if heap is empty + if heap: + returnitem = heap[0] + heap[0] = lastelt + _siftup_max(heap, 0) + return returnitem + return lastelt + + +def _heapreplace_max(heap, item): + """Maxheap version of a heappop followed by a heappush.""" + returnitem = heap[0] # raises appropriate IndexError if heap is empty + heap[0] = item + _siftup_max(heap, 0) + return returnitem + + +def _heapify_max(x): + """Transform list into a maxheap, in-place, in O(len(x)) time.""" + n = len(x) + for i in reversed(range(n//2)): + _siftup_max(x, i) + + +# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos +# is the index of a leaf with a possibly out-of-order value. Restore the +# heap invariant. +def _siftdown(heap, startpos, pos): + newitem = heap[pos] + # Follow the path to the root, moving parents down until finding a place + # newitem fits. + while pos > startpos: + parentpos = (pos - 1) >> 1 + parent = heap[parentpos] + if newitem < parent: + heap[pos] = parent + pos = parentpos + continue + break + heap[pos] = newitem + + +# The child indices of heap index pos are already heaps, and we want to make +# a heap at index pos too. We do this by bubbling the smaller child of +# pos up (and so on with that child's children, etc) until hitting a leaf, +# then using _siftdown to move the oddball originally at index pos into place. +# +# We *could* break out of the loop as soon as we find a pos where newitem <= +# both its children, but turns out that's not a good idea, and despite that +# many books write the algorithm that way. During a heap pop, the last array +# element is sifted in, and that tends to be large, so that comparing it +# against values starting from the root usually doesn't pay (= usually doesn't +# get us out of the loop early). See Knuth, Volume 3, where this is +# explained and quantified in an exercise. +# +# Cutting the # of comparisons is important, since these routines have no +# way to extract "the priority" from an array element, so that intelligence +# is likely to be hiding in custom comparison methods, or in array elements +# storing (priority, record) tuples. Comparisons are thus potentially +# expensive. +# +# On random arrays of length 1000, making this change cut the number of +# comparisons made by heapify() a little, and those made by exhaustive +# heappop() a lot, in accord with theory. Here are typical results from 3 +# runs (3 just to demonstrate how small the variance is): +# +# Compares needed by heapify Compares needed by 1000 heappops +# -------------------------- -------------------------------- +# 1837 cut to 1663 14996 cut to 8680 +# 1855 cut to 1659 14966 cut to 8678 +# 1847 cut to 1660 15024 cut to 8703 +# +# Building the heap by using heappush() 1000 times instead required +# 2198, 2148, and 2219 compares: heapify() is more efficient, when +# you can use it. +# +# The total compares needed by list.sort() on the same lists were 8627, +# 8627, and 8632 (this should be compared to the sum of heapify() and +# heappop() compares): list.sort() is (unsurprisingly!) more efficient +# for sorting. + + +def _siftup(heap, pos): + endpos = len(heap) + startpos = pos + newitem = heap[pos] + # Bubble up the smaller child until hitting a leaf. + childpos = 2*pos + 1 # leftmost child position + while childpos < endpos: + # Set childpos to index of smaller child. + rightpos = childpos + 1 + if rightpos < endpos and not heap[childpos] < heap[rightpos]: + childpos = rightpos + # Move the smaller child up. + heap[pos] = heap[childpos] + pos = childpos + childpos = 2*pos + 1 + # The leaf at pos is empty now. Put newitem there, and bubble it up + # to its final resting place (by sifting its parents down). + heap[pos] = newitem + _siftdown(heap, startpos, pos) + + +def _siftdown_max(heap, startpos, pos): + 'Maxheap variant of _siftdown' + newitem = heap[pos] + # Follow the path to the root, moving parents down until finding a place + # newitem fits. + while pos > startpos: + parentpos = (pos - 1) >> 1 + parent = heap[parentpos] + if parent < newitem: + heap[pos] = parent + pos = parentpos + continue + break + heap[pos] = newitem + + +def _siftup_max(heap, pos): + 'Maxheap variant of _siftup' + endpos = len(heap) + startpos = pos + newitem = heap[pos] + # Bubble up the larger child until hitting a leaf. + childpos = 2*pos + 1 # leftmost child position + while childpos < endpos: + # Set childpos to index of larger child. + rightpos = childpos + 1 + if rightpos < endpos and not heap[rightpos] < heap[childpos]: + childpos = rightpos + # Move the larger child up. + heap[pos] = heap[childpos] + pos = childpos + childpos = 2*pos + 1 + # The leaf at pos is empty now. Put newitem there, and bubble it up + # to its final resting place (by sifting its parents down). + heap[pos] = newitem + _siftdown_max(heap, startpos, pos) + + +def merge(*iterables, **kwargs): + '''Merge multiple sorted inputs into a single sorted output. + Similar to sorted(itertools.chain(*iterables)) but returns a generator, + does not pull the data into memory all at once, and assumes that each of + the input streams is already sorted (smallest to largest). + >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25])) + [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25] + If *key* is not None, applies a key function to each element to determine + its sort order. + >>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len)) + ['dog', 'cat', 'fish', 'horse', 'kangaroo'] + ''' + + key = kwargs.pop('key', None) + reverse = kwargs.pop('reverse', False) + + assert not kwargs, "Unrecognized kwargs: {}".format(kwargs) + + h = [] + h_append = h.append + + if reverse: + _heapify = _heapify_max + _heappop = _heappop_max + _heapreplace = _heapreplace_max + direction = -1 + else: + _heapify = heapify + _heappop = heappop + _heapreplace = heapreplace + direction = 1 + + if key is None: + for order, it in enumerate(map(iter, iterables)): + try: + next = functools.partial(builtin_next, it) + # next = getattr(it, '__next__', getattr(it, 'next')) + h_append([next(), order * direction, next]) + except StopIteration: + pass + _heapify(h) + while len(h) > 1: + try: + while True: + value, order, next = s = h[0] + yield value + s[0] = next() # raises StopIteration when exhausted + _heapreplace(h, s) # restore heap condition + except StopIteration: + _heappop(h) # remove empty iterator + if h: + # fast case when only a single iterator remains + value, order, next = h[0] + yield value + while True: + yield next() + return + + for order, it in enumerate(map(iter, iterables)): + try: + next = functools.partial(builtin_next, it) + # next = getattr(it, '__next__', getattr(it, 'next')) + value = next() + h_append([key(value), order * direction, value, next]) + except StopIteration: + pass + _heapify(h) + while len(h) > 1: + try: + while True: + key_value, order, value, next = s = h[0] + yield value + value = next() + s[0] = key(value) + s[2] = value + _heapreplace(h, s) + except StopIteration: + _heappop(h) + if h: + key_value, order, value, next = h[0] + yield value + while True: + yield next() + + +# Algorithm notes for nlargest() and nsmallest() +# ============================================== +# +# Make a single pass over the data while keeping the k most extreme values +# in a heap. Memory consumption is limited to keeping k values in a list. +# +# Measured performance for random inputs: +# +# number of comparisons +# n inputs k-extreme values (average of 5 trials) % more than min() +# ------------- ---------------- --------------------- ----------------- +# 1,000 100 3,317 231.7% +# 10,000 100 14,046 40.5% +# 100,000 100 105,749 5.7% +# 1,000,000 100 1,007,751 0.8% +# 10,000,000 100 10,009,401 0.1% +# +# Theoretical number of comparisons for k smallest of n random inputs: +# +# Step Comparisons Action +# ---- -------------------------- --------------------------- +# 1 1.66 * k heapify the first k-inputs +# 2 n - k compare remaining elements to top of heap +# 3 k * (1 + lg2(k)) * ln(n/k) replace the topmost value on the heap +# 4 k * lg2(k) - (k/2) final sort of the k most extreme values +# +# Combining and simplifying for a rough estimate gives: +# +# comparisons = n + k * (log(k, 2) * log(n/k) + log(k, 2) + log(n/k)) +# +# Computing the number of comparisons for step 3: +# ----------------------------------------------- +# * For the i-th new value from the iterable, the probability of being in the +# k most extreme values is k/i. For example, the probability of the 101st +# value seen being in the 100 most extreme values is 100/101. +# * If the value is a new extreme value, the cost of inserting it into the +# heap is 1 + log(k, 2). +# * The probability times the cost gives: +# (k/i) * (1 + log(k, 2)) +# * Summing across the remaining n-k elements gives: +# sum((k/i) * (1 + log(k, 2)) for i in range(k+1, n+1)) +# * This reduces to: +# (H(n) - H(k)) * k * (1 + log(k, 2)) +# * Where H(n) is the n-th harmonic number estimated by: +# gamma = 0.5772156649 +# H(n) = log(n, e) + gamma + 1 / (2 * n) +# http://en.wikipedia.org/wiki/Harmonic_series_(mathematics)#Rate_of_divergence +# * Substituting the H(n) formula: +# comparisons = k * (1 + log(k, 2)) * (log(n/k, e) + (1/n - 1/k) / 2) +# +# Worst-case for step 3: +# ---------------------- +# In the worst case, the input data is reversed sorted so that every new element +# must be inserted in the heap: +# +# comparisons = 1.66 * k + log(k, 2) * (n - k) +# +# Alternative Algorithms +# ---------------------- +# Other algorithms were not used because they: +# 1) Took much more auxiliary memory, +# 2) Made multiple passes over the data. +# 3) Made more comparisons in common cases (small k, large n, semi-random input). +# See the more detailed comparison of approach at: +# http://code.activestate.com/recipes/577573-compare-algorithms-for-heapqsmallest + +def nsmallest(n, iterable, key=None): + """Find the n smallest elements in a dataset. + Equivalent to: sorted(iterable, key=key)[:n] + """ + + # Short-cut for n==1 is to use min() + if n == 1: + it = iter(iterable) + sentinel = object() + if key is None: + result = min(it, default=sentinel) + else: + result = min(it, default=sentinel, key=key) + return [] if result is sentinel else [result] + + # When n>=size, it's faster to use sorted() + try: + size = len(iterable) + except (TypeError, AttributeError): + pass + else: + if n >= size: + return sorted(iterable, key=key)[:n] + + # When key is none, use simpler decoration + if key is None: + it = iter(iterable) + # put the range(n) first so that zip() doesn't + # consume one too many elements from the iterator + result = [(elem, i) for i, elem in zip(range(n), it)] + if not result: + return result + _heapify_max(result) + top = result[0][0] + order = n + _heapreplace = _heapreplace_max + for elem in it: + if elem < top: + _heapreplace(result, (elem, order)) + top = result[0][0] + order += 1 + result.sort() + return [r[0] for r in result] + + # General case, slowest method + it = iter(iterable) + result = [(key(elem), i, elem) for i, elem in zip(range(n), it)] + if not result: + return result + _heapify_max(result) + top = result[0][0] + order = n + _heapreplace = _heapreplace_max + for elem in it: + k = key(elem) + if k < top: + _heapreplace(result, (k, order, elem)) + top = result[0][0] + order += 1 + result.sort() + return [r[2] for r in result] + + +def nlargest(n, iterable, key=None): + """Find the n largest elements in a dataset. + Equivalent to: sorted(iterable, key=key, reverse=True)[:n] + """ + + # Short-cut for n==1 is to use max() + if n == 1: + it = iter(iterable) + sentinel = object() + if key is None: + result = max(it, default=sentinel) + else: + result = max(it, default=sentinel, key=key) + return [] if result is sentinel else [result] + + # When n>=size, it's faster to use sorted() + try: + size = len(iterable) + except (TypeError, AttributeError): + pass + else: + if n >= size: + return sorted(iterable, key=key, reverse=True)[:n] + + # When key is none, use simpler decoration + if key is None: + it = iter(iterable) + result = [(elem, i) for i, elem in zip(range(0, -n, -1), it)] + if not result: + return result + heapify(result) + top = result[0][0] + order = -n + _heapreplace = heapreplace + for elem in it: + if top < elem: + _heapreplace(result, (elem, order)) + top = result[0][0] + order -= 1 + result.sort(reverse=True) + return [r[0] for r in result] + + # General case, slowest method + it = iter(iterable) + result = [(key(elem), i, elem) for i, elem in zip(range(0, -n, -1), it)] + if not result: + return result + heapify(result) + top = result[0][0] + order = -n + _heapreplace = heapreplace + for elem in it: + k = key(elem) + if top < k: + _heapreplace(result, (k, order, elem)) + top = result[0][0] + order -= 1 + result.sort(reverse=True) + return [r[2] for r in result] + + +# If available, use C implementation +try: + from _heapq import * +except ImportError: + pass +try: + from _heapq import _heapreplace_max +except ImportError: + pass +try: + from _heapq import _heapify_max +except ImportError: + pass +try: + from _heapq import _heappop_max +except ImportError: + pass diff --git a/tinymr/_mrtools.py b/tinymr/_mrtools.py new file mode 100644 index 0000000..1a200db --- /dev/null +++ b/tinymr/_mrtools.py @@ -0,0 +1,100 @@ +""" +Helpers for MapReduce implementations. +""" + + +from collections import namedtuple + +import six + +from tinymr.tools import sorter + + +def strip_sort_key(kv_stream): + + """ + Given a stream of `(key, [(sort, data), (sort, data)])` with sort key + intact, remove the key from the values. + + Example: + + [ + ('key1', [(10, data1), (3, data25)]), + ('key2', [(200, data100), (250, data67)) + ] + + Produces: + + [ + ('key1', [data1, data25]), + ('key2', [data100, data67) + ] + + Parameters + ---------- + kv_stream : dict or iter + Dictionary like `{key: [(sort, data)]}` or a stream of tuples like + `(key, [(sort, data])`. + + Yields + ------ + tuple + `(key, [data, data, ...])` + """ + + kv_stream = six.iteritems(kv_stream) if isinstance(kv_stream, dict) else kv_stream + return ((k, tuple(i[-1] for i in v)) for k, v in kv_stream) + + +def sort_partitioned_values(kv_stream): + + """ + Given a stream of `(key, [(sort, data), (sort, data)])` sort the values + for every key. + + Example: + + [ + ('key1', [(10, data), (3, data)]), + ('key2', [(200, data), (250, data)) + ] + + Produces: + + [ + ('key1', [(3, data), (10, data)]), + ('key2', [(200, data), (250, data)) + ] + + Parameters + ---------- + kv_stream : dict or iter + Dictionary like `{key: [(sort, data]}` or a stream of tuples like + `(key, [(sort, data])`. + + Yields + ------ + tuple + `(key, [(sort, data), (sort, data), ...])` + """ + + kv_stream = six.iteritems(kv_stream) if isinstance(kv_stream, dict) else kv_stream + return ((k, sorter(v, key=lambda x: x[0])) for k, v in kv_stream) + + +class ReduceJob(namedtuple('ReduceJob', ['reducer', 'sort', 'jobs', 'chunksize'])): + + """ + Describes a reduce job. Makes keeping track of multiple reducers easier. + + Parameters + ---------- + reducer : callable + Does the reducing. Has a signature like `reducer(key, iter(values))`. + sort : bool + Determines if the partitioned values should be sorted. + jobs : int + Number of jobs to run in parallel. + chunksize : int + Amount of data to pass to one `job`. + """ diff --git a/tinymr/base.py b/tinymr/base.py index 5c5c07f..eba8905 100644 --- a/tinymr/base.py +++ b/tinymr/base.py @@ -3,30 +3,30 @@ Base classes. Subclass away! """ -from collections import defaultdict -from contextlib import contextmanager from itertools import chain import six +from tinymr import _mrtools +from tinymr import errors +from tinymr import tools -class MRBase(object): + +class BaseMapReduce(object): """ Base class for various MapReduce implementations. Not all methods are used by every implementation. """ - _closed = False - def __enter__(self): """ See `__exit__` for context manager usage. """ - if self.closed: - raise IOError("MapReduce task is closed - cannot reuse.") + # if self.closed: + # raise IOError("MapReduce task is closed - cannot reuse.") return self @@ -44,10 +44,109 @@ class MRBase(object): self.close() @property - def sort_map(self): + def jobs(self): """ - Sort the output from the map phase before the combine phase. + Default number of jobs to run in parallel for each phase. + + Returns + ------- + int + """ + + return 1 + + @property + def map_jobs(self): + + """ + Number of jobs to run in parallel during the map phase. Defaults + to `jobs` property. + + Returns + ------- + int + """ + + return self.jobs + + @property + def sort_jobs(self): + + """ + Number of jobs to run in parallel during the sort phases. Defaults + to `jobs` property. + + Returns + ------- + int + """ + + return self.jobs + + @property + def reduce_jobs(self): + + """ + Number of jobs to run in parallel during the reduce phase. If `None`, + defaults to `jobs` property. + + Returns + ------- + int + """ + + return self.jobs + + @property + def chunksize(self): + + """ + Amount of data to process in each `job`. + + Returns + ------- + int + """ + + return 1 + + @property + def map_chunksize(self): + + """ + Amount of data to process in each `job` during the map phase. + Defaults to `chunksize`. + """ + + return self.chunksize + + @property + def sort_chunksize(self): + + """ + Amount of data to process in each `job` during the sort phase. + Defaults to `chunksize`. + """ + + return self.chunksize + + @property + def reduce_chunksize(self): + + """ + Amount of data to process in each `job` during the reduce phase. + Defaults to `chunksize`. + """ + + return self.chunksize + + @property + def sort(self): + + """ + Disable all sorting phases. Setting individual properties overrides + this setting for individual phases. Returns ------- @@ -57,56 +156,59 @@ class MRBase(object): return True @property - def sort_combine(self): + def sort_map(self): """ - Sort the output from the combine phase before the partition phase. + Sort the output from the map phase before the combine phase. Returns ------- bool """ - return True + return self.sort @property - def sort_final_reduce(self): + def sort_combine(self): """ - Pass data to the `final_reducer()` sorted by key. + Sort the output from the combine phase before the partition phase. Returns ------- bool """ - return True + return self.sort @property - def sort_reduce(self): + def sort_output(self): """ - Sort the output from the `reducer()` phase before the `final_reducer(). + Pass data to `output()` sorted by key. Returns ------- bool """ - return True + return self.sort @property - def closed(self): + def sort_reduce(self): """ - Indicates whether or not the MapReduce task is closed. + Sort the output from each `reducer()` before executing the next or + before being passed to `output()`. + + Define one property per reducer, so `reducer2()` would be `sort_reduce2`. Returns ------- bool """ - return self._closed + return self.sort def close(self): @@ -158,7 +260,10 @@ class MRBase(object): See `reducer()` for more information. """ - raise NotImplementedError + # Not required so we raise a special exception that we can catch later + # Raising NotImplementedError also causes linters and code inspectors + # to prompt the user to implement this method when it is not required. + raise errors._CombinerNotImplemented def init_reduce(self): @@ -189,7 +294,7 @@ class MRBase(object): raise NotImplementedError - def final_reducer(self, pairs): + def output(self, pairs): """ Receives `(key, value)` pairs from each reducer. The output of this @@ -212,89 +317,94 @@ class MRBase(object): return ((key, tuple(values)) for key, values in pairs) - @contextmanager - def _partition(self, psd_stream): + @property + def _reduce_jobs(self): + + reducers = tools.sorter(filter( + lambda x: not x.startswith('_') and 'reducer' in x, + dir(self))) + + for r in reducers: + yield _mrtools.ReduceJob( + reducer=getattr(self, r), + sort=getattr(self, 'sort_{}'.format(r.replace('reducer', 'reduce'))), + jobs=getattr(self, '{}_jobs'.format(r.replace('reducer', 'reduce'))), + chunksize=getattr(self, '{}_jobs'.format(r.replace('reducer', 'reduce')))) + + def _map_combine_partition(self, stream): """ - Context manager to partition data and destroy it when finished. + Run `mapper()`, partition, `combiner()` (if implemented) and partition + on a chunk of input data. Data may be sorted between each phase + according to the control properties. + + Produces a dictionary of partitioned data with sort keys intact. Parameters ---------- - psd_stream : iter - Produces `(partition, sort, data)` tuples. - sort_key : bool, optional - Some MapReduce implementations don't benefit from sorting, and - therefore do not pass around a sort key. Set to `False` in this - case. + stream : iter + Input data passed to the MapReduce task. Returns ------- - defaultdict - Keys are partition keys and values are lists of `(sort, data)` tuples. + dict + {key: [(sort, data), (sort, data), ...]} """ - partitioned = defaultdict(list) - - try: - - for kv_data in psd_stream: - partitioned[kv_data[0]].append(kv_data) - - yield partitioned - - finally: - partitioned = None - - def _sorter(self, key_values, fake=False): - - """ - Produces sorted data without any keys. + # Map, partition, and convert back to a `(key, [v, a, l, u, e, s])` stream + mapped = chain(*(self.mapper(item) for item in stream)) + map_partitioned = tools.partition(mapped) + map_partitioned = six.iteritems(map_partitioned) - Parameters - ---------- - data : iter - Produces tuples from the map phase. - fake : bool, optional - Don't do the sort - just strip off the data key. + if self.sort_map: + map_partitioned = _mrtools.sort_partitioned_values(map_partitioned) - Yields - ------ - iter - Sorted data without keys. - """ + try: - for key, values in key_values: - values = iter(values) if fake else sorted(values, key=lambda x: x[-2]) - yield key, (v[-1] for v in values) + # The generators within this method get weird and don't break + # properly when wrapped in this try/except + # Instead we just kinda probe the `combiner()` to see if it exists + # and hope it doesn't do any setup. + self.combiner(None, None) + has_combiner = True - def _map(self, stream): + except errors.CombinerNotImplemented: + has_combiner = False - """ - Apply `mapper()` across the input stream. - """ + if has_combiner: + map_partitioned = _mrtools.strip_sort_key(map_partitioned) + combined = chain(*(self.combiner(k, v) for k, v in map_partitioned)) + combine_partitioned = tools.partition(combined) + combine_partitioned = six.iteritems(combine_partitioned) - return chain(*(self.mapper(item) for item in stream)) + # If we don't have a combiner then we don't need to partition either + # because we're already dealing with partitioned output from the + # map phase + else: + combine_partitioned = map_partitioned - def _reduce(self, kv_stream): + # If we don't have a combiner or if we're not sorting, then whatever + # we got from the mapper is good enough + if has_combiner and self.sort_combine: + combine_partitioned = _mrtools.sort_partitioned_values(combine_partitioned) - """ - Apply the `reducer()` across a stream of `(key, values)` pairs. - """ + return dict(combine_partitioned) - return chain(*(self.reducer(key, values) for key, values in kv_stream)) + def _reduce_partition(self, stream, reducer, sort): - def _combine(self, kv_stream): + reduced = chain(*(reducer(k, v) for k, v in stream)) + partitioned = tools.partition(reduced) + partitioned = six.iteritems(partitioned) - """ - Apply the `combiner()` across a stream of `(key, values)` pairs. - """ + if sort: + partitioned = _mrtools.sort_partitioned_values(partitioned) - return chain(*(self.combiner(key, values) for key, values in kv_stream)) + return tuple(partitioned) - def _final_reducer_sorter(self, kv_stream): + def _output_sorter(self, kv_stream): """ - Sort data by key before it enters the `final_reducer()`. + Sort data by key before it enters `output()`. Parameters ---------- @@ -306,4 +416,4 @@ class MRBase(object): tuple """ - return ((k, v) for k, v in sorted(kv_stream, key=lambda x: x[0])) + return ((k, v) for k, v in tools.sorter(kv_stream, key=lambda x: x[0])) diff --git a/tinymr/errors.py b/tinymr/errors.py new file mode 100644 index 0000000..ee546cc --- /dev/null +++ b/tinymr/errors.py @@ -0,0 +1,38 @@ +""" +tinymr specific exceptions. +""" + + +class MRException(Exception): + + """ + Base exception for tinymr. + """ + + +class UnorderableKeys(MRException): + + """ + Encountered keys during a sort operation that could not be ordered. This + could mean that some keys are `str`, some are `int`, some are `None`, etc. + """ + + +class CombinerNotImplemented(MRException): + + """ + MapReduce task does not implement a `combiner()`. + """ + + +class ClosedTask(MRException): + + """ + Cannot re-use closed MapReduce tasks. + """ + + +# Instantiated exceptions to make sure we get a clear message +_UnorderableKeys = UnorderableKeys(UnorderableKeys.__doc__.strip()) +_CombinerNotImplemented = CombinerNotImplemented(CombinerNotImplemented.__doc__.strip()) +_ClosedTask = ClosedTask(ClosedTask.__doc__.strip()) diff --git a/tinymr/memory.py b/tinymr/memory.py index a136a8e..74245f2 100644 --- a/tinymr/memory.py +++ b/tinymr/memory.py @@ -3,81 +3,47 @@ In-memory MapReduce - for those weird use cases ... """ -from itertools import chain -import multiprocessing as mp - -import six - -import tinymr as mr -import tinymr.base -import tinymr.tools - - -# class MRParallelNoSort(mr.base.MRBase): -# -# @property -# def jobs(self): -# -# """ -# Number of tasks to execute in parallel. -# """ -# -# return 1 -# -# @property -# def map_size(self): -# -# """ -# Number of items from the input data stream to hand to each mapper. -# """ -# -# return 1 -# -# def __call__(self, stream): -# -# stream = mr.tools.slicer(stream, self.map_size) -# -# combined = chain( -# *mp.Pool(self.jobs).imap_unordered(self._map_partition_combine, stream)) -# -# with self._partition_no_sort(combined) as partitioned: -# partitioned = tuple(six.iteritems(partitioned)) -# -# reduced = tuple(mp.Pool(self.jobs).imap_unordered(self._imap_reducer, partitioned)) -# -# with self._partition_no_sort(reduced) as partitioned: -# return self.final_reducer(six.iteritems(partitioned)) -# -# def _imap_reducer(self, pair): -# -# """ -# Adapter to integrate `reducer()` into the `imap_unordered()` API. -# """ -# -# return tuple(self.reducer(*pair)) - - -class MRSerial(mr.base.MRBase): - - """ - For MapReduce operations that don't benefit from sorting or parallelism. - - The `mapper()` and `reducer()` must yield 2 element tuples. The first - element is used for partitioning and the second is data. - """ +import functools +import logging + +from tinymr import _mrtools +from tinymr import base +from tinymr import tools +from tinymr.tools import runner + + +logger = logging.getLogger('tinymr') +logger.setLevel(logging.DEBUG) + + +class MapReduce(base.BaseMapReduce): def __call__(self, stream): - with self._partition(self._map(stream)) as partitioned: + sliced = tools.slicer(stream, self.map_chunksize) + + # Map, partition, combine, partition + with runner(self._map_combine_partition, sliced, self.map_jobs) as mcp: + partitioned = tools.merge_partitions(*mcp, sort=self.sort_combine) + + self.init_reduce() + + # Run all partition jobs + reducer_input = partitioned + for rj in self._reduce_jobs: + + func = functools.partial( + self._reduce_partition, reducer=rj.reducer, sort=rj.sort) - sorted_data = self._sorter(six.iteritems(partitioned), fake=self.sort_map) + reducer_input = _mrtools.strip_sort_key(reducer_input) + sliced = tools.slicer(reducer_input, rj.chunksize) - with self._partition(self._reduce(sorted_data)) as partitioned: + with runner(func, sliced, rj.jobs) as reduced: + partitioned = tools.merge_partitions(*reduced, sort=rj.sort) - sorted_data = self._sorter(six.iteritems(partitioned), fake=self.sort_reduce) + partitioned = _mrtools.strip_sort_key(partitioned) - if self.sort_final_reduce: - sorted_data = self._final_reducer_sorter(sorted_data) + if self.sort_output: + partitioned = self._output_sorter(partitioned) - self.init_reduce() - return self.final_reducer(sorted_data) + return self.output(partitioned) diff --git a/tinymr/tools.py b/tinymr/tools.py index 422faa3..eddc9e5 100644 --- a/tinymr/tools.py +++ b/tinymr/tools.py @@ -3,12 +3,37 @@ Tools for building MapReduce implementations. """ -from collections import OrderedDict +from collections import defaultdict import itertools as it import multiprocessing as mp +import six from six.moves import zip +from tinymr._backport_heapq import merge as heapq_merge +from tinymr import errors + + +# Make instance methods pickle-able in Python 2 +# Instance methods are not available as a type, so we have to create a tiny +# class so we can grab an instance method +# We then register our improved _reduce_method() with copy_reg so pickle knows +# what to do. +if six.PY2: # pragma: no cover + import copy_reg + + class _I: + def m(self): + pass + + def _reduce_method(m): + if m.im_self is None: + return getattr, (m.im_class, m.im_func.func_name) + else: + return getattr, (m.im_self, m.im_func.func_name) + + copy_reg.pickle(type(_I().m), _reduce_method) + def slicer(iterable, chunksize): @@ -46,53 +71,73 @@ def slicer(iterable, chunksize): raise StopIteration -def runner(func, iterable, jobs): +class runner(object): """ The `multiprocessing` module can be difficult to debug and introduces some overhead that isn't needed when only running one job. Use a generator in this case instead. - Parameters - ---------- - func : callable - Callable object to map across `iterable`. - iterable : iter - Data to process. - jobs : int + Wrapped in a class to make the context syntax optional. """ - if jobs < 1: - raise ValueError("jobs must be >= 1, not: {}".format(jobs)) - elif jobs == 1: - return (func(i) for i in iterable) - else: - return mp.Pool(jobs).imap_unordered(func, iterable) + def __init__(self, func, iterable, jobs): + + """ + Parameters + ---------- + func : callable + Callable object to map across `iterable`. + iterable : iter + Data to process. + jobs : int + Number of `multiprocessing` jobs. + """ + + self._func = func + self._iterable = iterable + self._jobs = jobs + self._closed = False + + if jobs < 1: + raise ValueError("jobs must be >= 1, not: {}".format(jobs)) + elif jobs == 1: + self._pool = None + self._proc = (func(i) for i in iterable) + else: + self._pool = mp.Pool(jobs) + self._proc = self._pool.imap_unordered(func, iterable) + + def __enter__(self): + return self + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() -class DefaultOrderedDict(OrderedDict): + def __repr__(self): + return "{cname}(func={func}, iterable={iterable}, jobs={jobs})".format( + cname=self.__class__.__name__, + func=repr(self._func), + iterable=repr(self._iterable), + jobs=self._jobs) - def __init__(self, default_factory, *args, **kwargs): + def __iter__(self): + return self._proc - if not callable(default_factory): - raise TypeError("default_factory must be callable") + def __next__(self): + return next(self._proc) - super(DefaultOrderedDict, self).__init__(*args, **kwargs) - self.default_factory = default_factory + next = __next__ - def __missing__(self, key): - v = self.default_factory() - super(DefaultOrderedDict, self).__setitem__(key, v) - return v + def close(self): - def __repr__(self): - return "{cname}({df}, {dr})".format( - cname=self.__class__.__name__, - df=self.default_factory, - dr=super(DefaultOrderedDict, self).__repr__()) + """ + Close the `multiprocessing` pool if we're using it. + """ - def copy(self): - return self.__class__(self.default_factory, self) + if self._pool is not None: + self._pool.close() + self._closed = True def mapkey(key, values): @@ -121,4 +166,205 @@ def mapkey(key, values): iter """ - return zip(it.cycle([key]), values) + return zip(it.repeat(key), values) + + +def sorter(*args, **kwargs): + + """ + Wrapper for the builtin `sorted()` that produces a better error when + unorderable types are encountered. + + Instead of: + + >>> sorted(['1', 1]) + Traceback (most recent call last): + File "<stdin>", line 1, in <module> + TypeError: unorderable types: int() < str() + + we get a `tinymr.errors.UnorderableKeys` exception. + + Python 2 is much more forgiving of unorderable types so the example above + does not raise an exception. + + Parameters + ---------- + *args : *args + Positional arguments for `sorted()`. + **kwargs : **kwargs + Keyword arguments for `sorted()`. + + Raises + ------ + tinymr.errors.UnorderableKeys + + Returns + ------- + list + Output from `sorted()`. + """ + + try: + return sorted(*args, **kwargs) + except TypeError as e: + if 'unorderable' in str(e): + raise errors._UnorderableKeys + else: + raise e + + +def partition(key_values): + + """ + Given a stream of `(key, value)` tuples, group them by key into a dict. + Equivalent to the code below, but faster: + + >>> from itertools import groupby + >>> {k: list(v) for k, v in groupby(key_values, key=lambda x: x[0])} + + Example: + + >>> data = [('key1', 1), ('key1', 2), ('key2', None)] + >>> partition(data) + { + 'key1': [('key1', 1), ('key1', 2)], + 'key2': [('key2', None)] + } + + Parameters + ---------- + key_values : iter + Tuples - typically `(key, value)`, although only the first key is + + Returns + ------- + dict + """ + + out = defaultdict(list) + for data in key_values: + out[data[0]].append(data[1:]) + + return dict(out) + + +class Orderable(object): + + """ + Make any object orderable. + """ + + __slots__ = ['_obj', '_lt', '_le', '_gt', '_ge', '_eq'] + + def __init__(self, obj, lt=True, le=True, gt=False, ge=False, eq=False): + + """ + Default parameters make the object sort as less than or equal to. + + Parameters + ---------- + obj : object + The object being made orderable. + lt : bool, optional + Set `__lt__()` evaluation. + le : bool, optional + Set `__le__()` evaluation. + gt : bool, optional + Set `__gt__()` evaluation. + ge : bool, optional + Set `__ge__()` evaluation. + eq : bool or None, optional + Set `__eq__()` evaluation. Set to `None` to enable a real + equality check. + """ + + self._obj = obj + self._lt = lt + self._le = le + self._gt = gt + self._ge = ge + self._eq = eq + + @property + def obj(self): + + """ + Handle to the object being made orderable. + """ + + return self._obj + + def __lt__(self, other): + return self._lt + + def __le__(self, other): + return self._le + + def __gt__(self, other): + return self._gt + + def __ge__(self, other): + return self._ge + + def __eq__(self, other): + if self._eq is None: + return isinstance(other, self.__class__) and other.obj == self.obj + else: + return self._eq + + +class _OrderableNone(Orderable): + + """ + Like `None` but orderable. + """ + + def __init__(self): + + """ + Use the instantiated `OrderableNone` variable. + """ + + super(_OrderableNone, self).__init__(None, eq=None) + + +# Instantiate so we can make it more None-like +OrderableNone = _OrderableNone() + + +def merge_partitions(*partitions, **kwargs): + + """ + Merge data from multiple `partition()` operations into one dictionary. + + Parameters + ---------- + partitions : *args + Dictionaries from `partition()`. + sort : bool, optional + Sort partitioned data as it is merged. Uses `heapq.merge()` so within + each partition's key, all values must be sorted smallest to largest. + + Returns + ------- + dict + {key: [values]} + """ + + sort = kwargs.pop('sort', False) + assert not kwargs, "Unrecognized kwargs: {}".format(kwargs) + + partitions = (six.iteritems(ptn) if isinstance(ptn, dict) else ptn for ptn in partitions) + + out = defaultdict(list) + + if not sort: + for ptn in partitions: + for key, values in ptn: + out[key].extend(values) + else: + for ptn in partitions: + for key, values in ptn: + out[key] = tuple(heapq_merge(out[key], values, key=lambda x: x[0])) + + return dict(out)
Parallel mapping with a chunksize The inputer iterable can be read in chunks and shoved off into `mapper()` subprocesses would be extremely helpful.
geowurster/tinymr
diff --git a/tests/conftest.py b/tests/conftest.py index c9ec77e..569196d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -39,7 +39,7 @@ def tiny_text_wc_output(): @pytest.fixture(scope='function') def mr_wordcount_memory_no_sort(): - class WordCount(mr.memory.MRSerial): + class WordCount(mr.memory.MapReduce): def mapper(self, item): for word in item.split(): @@ -48,7 +48,7 @@ def mr_wordcount_memory_no_sort(): def reducer(self, key, values): yield key, sum(values) - def final_reducer(self, pairs): + def output(self, pairs): return {k: tuple(v)[0] for k, v in pairs} return WordCount diff --git a/tests/test_base.py b/tests/test_base.py index 0ce44ee..7468888 100644 --- a/tests/test_base.py +++ b/tests/test_base.py @@ -6,14 +6,15 @@ Unittests for tinymr.base import pytest from tinymr import base +from tinymr import errors def test_not_implemented_methods(): - mr = base.MRBase() + mr = base.BaseMapReduce() with pytest.raises(NotImplementedError): mr.mapper(None) - with pytest.raises(NotImplementedError): + with pytest.raises(errors.CombinerNotImplemented): mr.combiner(None, None) with pytest.raises(NotImplementedError): mr.reducer(None, None) @@ -21,59 +22,68 @@ def test_not_implemented_methods(): def test_default_settings(): - mr = base.MRBase() + mr = base.BaseMapReduce() assert mr.sort_map assert mr.sort_combine assert mr.sort_reduce - assert mr.sort_final_reduce + assert mr.sort_output def test_default_methods(): - mr = base.MRBase() + mr = base.BaseMapReduce() expected = [(i, tuple(range(i))) for i in range(1, 10)] - assert list(mr.final_reducer(expected)) == expected - - -def test_context_manager(): - - class MR(base.MRBase): - - def __init__(self): - self._closed = False - - def close(self): - self._closed = True - - with MR() as mr: - assert not mr.closed - assert mr.closed - - -def test_no_context_manager(): - - class MR(base.MRBase): - - def close(self): - self._closed = True - - mr = MR() - assert not mr.closed - mr.close() - assert mr.closed - assert not MR._closed - assert not MR().closed - - -def test_cant_reuse_tasks(): - - class MR(base.MRBase): - pass - - with MR() as mr: - pass - - assert mr.closed - with pytest.raises(IOError): - with mr as c: - pass + assert list(mr.output(expected)) == expected + + +# def test_context_manager(): +# +# class MapReduce(base.BaseMapReduce): +# +# def __init__(self): +# self._closed = False +# +# def close(self): +# self._closed = True +# +# with MapReduce() as mr: +# assert not mr.closed +# assert mr.closed + + +# def test_no_context_manager(): +# +# class MapReduce(base.BaseMapReduce): +# +# def close(self): +# self._closed = True +# +# mr = MapReduce() +# assert not mr.closed +# mr.close() +# assert mr.closed +# assert not MapReduce._closed +# assert not MapReduce().closed + + +# def test_cannot_reuse_tasks(): +# +# class MapReduce(base.BaseMapReduce): +# pass +# +# with MapReduce() as mr: +# pass +# +# # assert mr.closed +# with pytest.raises(IOError): +# with mr as c: +# pass + + +# def test_runtime_validate(): +# +# class MapReduce(base.BaseMapReduce): +# closed = True +# +# with pytest.raises(errors.ClosedTask): +# MapReduce()._runtime_validate() diff --git a/tests/test_memory.py b/tests/test_memory.py index 39314a0..59078cd 100644 --- a/tests/test_memory.py +++ b/tests/test_memory.py @@ -7,14 +7,14 @@ import tinymr as mr import tinymr.memory -def test_MRSerial_no_sort(tiny_text, tiny_text_wc_output, mr_wordcount_memory_no_sort): - - with mr_wordcount_memory_no_sort() as wc: - actual = wc(tiny_text.splitlines()) - assert actual == tiny_text_wc_output +# def test_MapReduce_no_sort(tiny_text, tiny_text_wc_output, mr_wordcount_memory_no_sort): +# +# with mr_wordcount_memory_no_sort() as wc: +# actual = wc(tiny_text.splitlines()) +# assert actual == tiny_text_wc_output -def test_MRSerial_init_reduce(tiny_text, tiny_text_wc_output, mr_wordcount_memory_no_sort): +def test_MapReduce_init_reduce(tiny_text, tiny_text_wc_output, mr_wordcount_memory_no_sort): class WCInitReduce(mr_wordcount_memory_no_sort): @@ -23,3 +23,175 @@ def test_MRSerial_init_reduce(tiny_text, tiny_text_wc_output, mr_wordcount_memor with WCInitReduce() as wc: actual = wc(tiny_text.splitlines()) + + +def test_MapReduce_sort(): + + """ + Make sure enabling sorting actually sorts. + """ + + text = [ + 'key2 sort2 data2', + 'key2 sort1 data1', + 'key3 sort2 data2', + 'key3 sort1 data1', + 'key1 sort2 data2', + 'key1 sort1 data1' + ] + + class WordCount(mr.memory.MapReduce): + + # Make sure everything gets sent to a single map + combine + chunksize = 10 + + def mapper(self, item): + yield item.split() + + def combiner(self, key, values): + + d1, d2 = list(values) + assert [d1, d2] == ['data1', 'data2'] + + yield key, 'sort2', d2 + yield key, 'sort1', d1 + + def reducer(self, key, values): + + d1, d2 = list(values) + assert [d1, d2] == ['data1', 'data2'] + + yield key, 'sort2', d2 + yield key, 'sort1', d1 + + wc = WordCount() + + for attr in ('jobs', 'map_jobs', 'sort_jobs', 'reduce_jobs'): + assert getattr(wc, attr) == 1 + for attr in ('sort', 'sort_map', 'sort_combine', 'sort_reduce', 'sort_output'): + assert getattr(wc, attr) + for attr in ('chunksize', 'map_chunksize', 'reduce_chunksize', 'sort_chunksize'): + assert getattr(wc, attr) == 10 + + assert tuple(wc(text)) == ( + ('key1', ('data1', 'data2')), + ('key2', ('data1', 'data2')), + ('key3', ('data1', 'data2'))) + + +def test_MapReduce_no_sort(): + + """ + Make sure that disabling sorting actually disables sorting. + """ + + text = [ + 'key2 sort2 data2', + 'key2 sort1 data1', + 'key3 sort2 data2', + 'key3 sort1 data1', + 'key1 sort2 data2', + 'key1 sort1 data1' + ] + + class WordCount(mr.memory.MapReduce): + + # Make sure everything gets sent to a single map + combine + chunksize = 10 + sort = False + + def mapper(self, item): + yield item.split() + + def combiner(self, key, values): + + d2, d1 = list(values) + assert [d2, d1] == ['data2', 'data1'] + + yield key, 'sort2', d2 + yield key, 'sort1', d1 + + def _final_reducer_sorter(self, kv_stream): + raise Exception("Shouldn't hit this.") + + def reducer(self, key, values): + + d2, d1 = list(values) + assert [d2, d1] == ['data2', 'data1'] + + yield key, 'sort2', d2 + yield key, 'sort1', d1 + + wc = WordCount() + for attr in ('jobs', 'map_jobs', 'sort_jobs', 'reduce_jobs'): + assert getattr(wc, attr) == 1 + for attr in ('sort', 'sort_map', 'sort_combine', 'sort_reduce', 'sort_output'): + assert getattr(wc, attr) is False + for attr in ('chunksize', 'map_chunksize', 'reduce_chunksize', 'sort_chunksize'): + assert getattr(wc, attr) == 10 + + # Can't really check key order here, so we're just going to + assert dict(wc(text)) == { + 'key2': ('data2', 'data1'), + 'key3': ('data2', 'data1'), + 'key1': ('data2', 'data1')} + + +class _WCParallelSort(mr.memory.MapReduce): + + """ + Define out here so we can pickle it in multiprocessing + """ + + # Make sure everything gets sent to a single map + combine + chunksize = 10 + jobs = 4 + + def mapper(self, item): + yield item.split() + + def combiner(self, key, values): + + d1, d2 = list(values) + assert [d1, d2] == ['data1', 'data2'] + + yield key, 'sort2', d2 + yield key, 'sort1', d1 + + def reducer(self, key, values): + + d1, d2 = list(values) + assert [d1, d2] == ['data1', 'data2'] + + yield key, 'sort2', d2 + yield key, 'sort1', d1 + + +def test_MapReduce_parallel_sort(): + + """ + Process in parallel with sorting. + """ + + text = [ + 'key2 sort2 data2', + 'key2 sort1 data1', + 'key3 sort2 data2', + 'key3 sort1 data1', + 'key1 sort2 data2', + 'key1 sort1 data1' + ] + + wc = _WCParallelSort() + + for attr in ('jobs', 'map_jobs', 'sort_jobs', 'reduce_jobs'): + assert getattr(wc, attr) == 4 + for attr in ('sort', 'sort_map', 'sort_combine', 'sort_reduce', 'sort_output'): + assert getattr(wc, attr) + for attr in ('chunksize', 'map_chunksize', 'reduce_chunksize', 'sort_chunksize'): + assert getattr(wc, attr) == 10 + + assert tuple(wc(text)) == ( + ('key1', ('data1', 'data2')), + ('key2', ('data1', 'data2')), + ('key3', ('data1', 'data2'))) diff --git a/tests/test_tools.py b/tests/test_tools.py index 7d17b63..4b66402 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -3,13 +3,14 @@ Unittests for tinymr.tools """ -import pickle +from collections import defaultdict from multiprocessing.pool import IMapUnorderedIterator from types import GeneratorType import pytest import six +from tinymr import errors from tinymr import tools @@ -45,104 +46,179 @@ def _func(v): return v + 1 -def test_runner(): +def test_runner_1job(): input = list(range(10)) expected = tuple(i + 1 for i in input) j1 = tools.runner(_func, input, 1) - assert isinstance(j1, GeneratorType) + assert isinstance(j1, tools.runner) + assert isinstance(iter(j1), GeneratorType) assert tuple(j1) == expected - j2 = tools.runner(_func, input, 2) - assert isinstance(j2, IMapUnorderedIterator) - assert tuple(sorted(j2)) == expected +def test_runner_2job(): + + input = list(range(10)) + expected = tuple(i + 1 for i in input) + + # Also tests context manager + with tools.runner(_func, input, 2) as j2: + assert not j2._closed + assert isinstance(j2, tools.runner) + assert isinstance(iter(j2), IMapUnorderedIterator) + assert tuple(sorted(j2)) == expected + assert j2._closed + + +def test_runner_next(): + + input = list(range(10)) + expected = list(i + 1 for i in input) + + r = tools.runner(_func, input, 1) + assert next(r) == _func(input[0]) + + # Multiple jobs - have to pretty much run the whole thing and sort to compare + results = [] + with tools.runner(_func, input, 2) as proc: + for i in input: + results.append(next(proc)) + + assert sorted(results) == expected + + +def test_runner_attrs_and_exceptions(): + + # repr + r = tools.runner(_func, range(10), 2) + assert repr(r).startswith(r.__class__.__name__) + assert 'jobs=2' in repr(r) + assert 'iterable={}'.format(repr(range(10))) in repr(r) + + # Bad values with pytest.raises(ValueError): tools.runner(None, None, -1) -class TestDefaultUnorderedDict: +def test_mapkey(): + + actual = tools.mapkey('key', range(5)) + expected = [('key', 0), ('key', 1), ('key', 2), ('key', 3), ('key', 4)] + + assert not isinstance(actual, (list, tuple)) # Make sure we get an iterator + assert list(actual) == expected - def setup_method(self, method): - self.d = tools.DefaultOrderedDict(list) - def test_repr(self): - assert self.d.__class__.__name__ in repr(self.d) +def test_sorter(): - def test_present_key(self): - self.d['key'] = 'value' - assert self.d['key'] == 'value' + items = [1, 6, 3, 5, 9, 10] + assert sorted(items) == tools.sorter(items) - def test_missing_key(self): - assert self.d['missing'] == [] - def test_bool_true(self): - self.d[None] = 'word' - assert self.d +# Python 2 isn't very forgiving when it comes to sorting. +# Make sure a useful error is raised for unorderable types +if six.PY3: + def test_sorter_unorderable(): + # Unorderable types + with pytest.raises(errors.UnorderableKeys): + tools.sorter(['2', 1]) - def test_bool_false(self): - assert not self.d - def test_not_present(self): - d = tools +def test_sorter_exceptions(): - def test_exceptions(self): - with pytest.raises(TypeError): - tools.DefaultOrderedDict(None) + if not six.PY2: + with pytest.raises(errors.UnorderableKeys): + tools.sorter(['1', 1]) - def test_copy(self): - self.d['key1'] = 'v1' - self.d['key2'] = 'v2' - c = self.d.copy() - assert isinstance(c, tools.DefaultOrderedDict) - assert self.d['key1'] == 'v1' - assert self.d['key2'] == 'v2' - self.d['key1'] = None - assert c['key1'] == 'v1' - assert len(c) == 2 - assert list(c.keys()) == ['key1', 'key2'] - assert list(c.values()) == ['v1', 'v2'] - assert c.default_factory is list + def _k(v): + raise TypeError('bad') - def test_sorted_keys(self): + with pytest.raises(TypeError): + tools.sorter([2, 1], key=_k) - """ - Verify that keys maintain their insert position. - """ - # Set values - it = list(range(10)) - for i in it: - self.d[i] = i + 1 +def test_Orderable(): - # Check values - for k, v in self.d.items(): - assert k + 1 == v + on = tools.Orderable(None) + for v in (-1, 0, 1): + assert on < v + assert on <= v + assert not on > v + assert not on >= v + assert on != v + assert on.obj is None - # Check sorting - assert list(self.d.keys()) == it - assert sorted(self.d.keys()) == it + on = tools.Orderable(None, lt=False, le=False, gt=True, ge=True) + for v in (-1, 0, 1): + assert on > v + assert on >= v + assert not on < v + assert not on <= v + assert on != v + assert on.obj is None - def test_unsorted_keys(self): + # Actually perform equality test + on = tools.Orderable(None, eq=None) + assert on == on + assert not on is False + assert not on == 67 - """ - Verify that unsorted keys remain the the same unsorted order. - """ + # Never equal to a type + on = tools.Orderable(None, eq=False) + assert not on == on + assert not on == on + assert not on == 'True' + assert not on == 21 - for i in range(5): - self.d[i] = i + 1 - for i in reversed(range(30, 35)): - self.d[i] = i + 1 + # Always equal to any type + on = tools.Orderable(None, eq=True) + assert on == on + assert on == 'False' + assert on == 10 - assert list(self.d.keys()) == [0, 1, 2, 3, 4, 34, 33, 32, 31, 30] - assert len(self.d.keys()) == 10 +def test_OrderableNone(): -def test_mapkey(): + assert isinstance(tools.OrderableNone, tools._OrderableNone) + assert tools.OrderableNone.obj is None - actual = tools.mapkey('key', range(5)) - expected = [('key', 0), ('key', 1), ('key', 2), ('key', 3), ('key', 4)] - assert not isinstance(actual, (list, tuple)) # Make sure we get an iterator - assert list(actual) == expected +def test_partition(): + + data = [ + (1, 2), + (1, 1), + (2, 1), + (3, 1), + ('ptn', 'sort', 'data')] + + expected = { + 1: [(2,), (1,)], + 2: [(1,)], + 3: [(1,)], + 'ptn': [('sort', 'data')]} + + ptn = tools.partition(data) + + assert isinstance(ptn, dict) + assert not isinstance(ptn, defaultdict) + assert ptn == expected + + +def test_merge_partitions(): + + dptn = { + 1: [(1, 2), (1, 1)], + 2: [(2, 1)], + 3: [(3, 1)], + 'ptn': [('ptn', 'sort', 'data')]} + + expected = { + 1: [(1, 2), (1, 1), (1, 2), (1, 1)], + 2: [(2, 1), (2, 1)], + 3: [(3, 1), (3, 1)], + 'ptn': [('ptn', 'sort', 'data'), ('ptn', 'sort', 'data')]} + + actual = tools.merge_partitions(dptn, dptn) + assert expected == actual
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 5 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.5", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 charset-normalizer==2.0.12 coverage==6.2 coveralls==3.3.1 docopt==0.6.2 idna==3.10 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 pytest-cov==4.0.0 requests==2.27.1 six==1.17.0 -e git+https://github.com/geowurster/tinymr.git@a387cf72cfc2a18978b77058e1e28f532258ae49#egg=tinymr toml @ file:///tmp/build/80754af9/toml_1616166611790/work tomli==1.2.3 typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work urllib3==1.26.20 zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: tinymr channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - charset-normalizer==2.0.12 - coverage==6.2 - coveralls==3.3.1 - docopt==0.6.2 - idna==3.10 - pytest-cov==4.0.0 - requests==2.27.1 - six==1.17.0 - tomli==1.2.3 - urllib3==1.26.20 prefix: /opt/conda/envs/tinymr
[ "tests/test_base.py::test_not_implemented_methods", "tests/test_base.py::test_default_settings", "tests/test_base.py::test_default_methods", "tests/test_memory.py::test_MapReduce_init_reduce", "tests/test_memory.py::test_MapReduce_sort", "tests/test_memory.py::test_MapReduce_no_sort", "tests/test_memory.py::test_MapReduce_parallel_sort", "tests/test_tools.py::test_slicer_even", "tests/test_tools.py::test_slicer_odd", "tests/test_tools.py::test_runner_1job", "tests/test_tools.py::test_runner_2job", "tests/test_tools.py::test_runner_next", "tests/test_tools.py::test_runner_attrs_and_exceptions", "tests/test_tools.py::test_mapkey", "tests/test_tools.py::test_sorter", "tests/test_tools.py::test_Orderable", "tests/test_tools.py::test_OrderableNone", "tests/test_tools.py::test_partition", "tests/test_tools.py::test_merge_partitions" ]
[ "tests/test_tools.py::test_sorter_unorderable", "tests/test_tools.py::test_sorter_exceptions" ]
[]
[]
New BSD License
370
sympy__sympy-10377
0e0aec4a3a03649085d2aea8ec17ae94691f0770
2016-01-09 21:16:05
0241b35cae5f2adc04dc37b25f7dc9c5f00bd746
smichr: > Why does one have to import AZ here no longer needed. In latter commits I clean up arg handling. I see that I have some more cleanup of doctests to do. Also, the `rst` file needs updating to clear the testing error. smichr: Laguna has to be indented, apparently.
diff --git a/doc/src/modules/crypto.rst b/doc/src/modules/crypto.rst index 808928d034..54a6fdcb7b 100644 --- a/doc/src/modules/crypto.rst +++ b/doc/src/modules/crypto.rst @@ -1,29 +1,52 @@ Basic Cryptography Module ========================= -Included in this module are both block ciphers and stream ciphers. +Encryption is the process of hiding a message and a cipher is a +means of doing so. Included in this module are both block and stream +ciphers: * Shift cipher * Affine cipher - * Bifid ciphers - * Vigenere's cipher * substitution ciphers + * Vigenere's cipher * Hill's cipher + * Bifid ciphers * RSA * Kid RSA - * linear feedback shift registers (a stream cipher) + * linear-feedback shift registers (for stream ciphers) * ElGamal encryption +In a *substitution cipher* "units" (not necessarily single characters) +of plaintext are replaced with ciphertext according to a regular system. + +A *transposition cipher* is a method of encryption by which +the positions held by "units" of plaintext are replaced by a +permutation of the plaintext. That is, the order of the units is +changed using a bijective function on the position of the characters +to perform the encryption. + +A *monoalphabetic cipher* uses fixed substitution over the entire +message, whereas a *polyalphabetic cipher* uses a number of +substitutions at different times in the message. + .. module:: sympy.crypto.crypto -.. autofunction:: alphabet_of_cipher +.. autofunction:: AZ + +.. autofunction:: padded_key + +.. autofunction:: check_and_join .. autofunction:: cycle_list .. autofunction:: encipher_shift +.. autofunction:: decipher_shift + .. autofunction:: encipher_affine +.. autofunction:: decipher_affine + .. autofunction:: encipher_substitution .. autofunction:: encipher_vigenere @@ -34,6 +57,12 @@ Included in this module are both block ciphers and stream ciphers. .. autofunction:: decipher_hill +.. autofunction:: encipher_bifid + +.. autofunction:: decipher_bifid + +.. autofunction:: bifid5_square + .. autofunction:: encipher_bifid5 .. autofunction:: decipher_bifid5 @@ -46,10 +75,6 @@ Included in this module are both block ciphers and stream ciphers. .. autofunction:: bifid6_square -.. autofunction:: encipher_bifid7 - -.. autofunction:: bifid7_square - .. autofunction:: rsa_public_key .. autofunction:: rsa_private_key @@ -83,3 +108,13 @@ Included in this module are both block ciphers and stream ciphers. .. autofunction:: encipher_elgamal .. autofunction:: decipher_elgamal + +.. autofunction:: dh_public_key + +.. autofunction:: dh_private_key + +.. autofunction:: dh_shared_key + +.. autofunction:: encipher_elgamal + +.. autofunction:: decipher_elgamal diff --git a/sympy/core/compatibility.py b/sympy/core/compatibility.py index 251a7db5a6..702a02cfc6 100644 --- a/sympy/core/compatibility.py +++ b/sympy/core/compatibility.py @@ -327,6 +327,13 @@ def __ne__(self, other): except ImportError: # <= Python 2.7 from itertools import izip_longest as zip_longest + +try: + from string import maketrans +except ImportError: + maketrans = str.maketrans + + try: from itertools import combinations_with_replacement except ImportError: # <= Python 2.6 diff --git a/sympy/crypto/__init__.py b/sympy/crypto/__init__.py index 5b8bb665ad..4b19d5fb7e 100644 --- a/sympy/crypto/__init__.py +++ b/sympy/crypto/__init__.py @@ -1,12 +1,13 @@ -from sympy.crypto.crypto import (alphabet_of_cipher, cycle_list, - encipher_shift, encipher_affine, encipher_substitution, - encipher_vigenere, decipher_vigenere, - bifid5_square, bifid6_square, bifid7_square, - encipher_hill, decipher_hill, encipher_bifid5, encipher_bifid6, - encipher_bifid7, decipher_bifid5, decipher_bifid6, encipher_kid_rsa, - decipher_kid_rsa, kid_rsa_private_key, kid_rsa_public_key, - decipher_rsa, rsa_private_key, rsa_public_key, encipher_rsa, - lfsr_connection_polynomial, lfsr_autocorrelation, lfsr_sequence, - encode_morse, decode_morse, elgamal_private_key, elgamal_public_key, - decipher_elgamal, encipher_elgamal, dh_private_key, dh_public_key, - dh_shared_key) +from sympy.crypto.crypto import (cycle_list, + encipher_shift, encipher_affine, encipher_substitution, + check_and_join, encipher_vigenere, decipher_vigenere, bifid5_square, + bifid6_square, encipher_hill, decipher_hill, + encipher_bifid5, encipher_bifid6, decipher_bifid5, + decipher_bifid6, encipher_kid_rsa, decipher_kid_rsa, + kid_rsa_private_key, kid_rsa_public_key, decipher_rsa, rsa_private_key, + rsa_public_key, encipher_rsa, lfsr_connection_polynomial, + lfsr_autocorrelation, lfsr_sequence, encode_morse, decode_morse, + elgamal_private_key, elgamal_public_key, decipher_elgamal, + encipher_elgamal, dh_private_key, dh_public_key, dh_shared_key, + padded_key, encipher_bifid, decipher_bifid, bifid_square, bifid5, + bifid6, bifid10) diff --git a/sympy/crypto/crypto.py b/sympy/crypto/crypto.py index ee55da732e..2b2968e4f3 100644 --- a/sympy/crypto/crypto.py +++ b/sympy/crypto/crypto.py @@ -1,102 +1,172 @@ # -*- coding: utf-8 -*- """ -Classical ciphers and LFSRs +This file contains some classical ciphers and routines +implementing a linear-feedback shift register (LFSR) +and the Diffie-Hellman key exchange. """ from __future__ import print_function -from random import randrange +from string import whitespace, ascii_uppercase as uppercase, printable from sympy import nextprime from sympy.core import Rational, S, Symbol -from sympy.core.numbers import igcdex +from sympy.core.numbers import igcdex, mod_inverse from sympy.core.compatibility import range from sympy.matrices import Matrix from sympy.ntheory import isprime, totient, primitive_root from sympy.polys.domains import FF -from sympy.polys.polytools import gcd, Poly, invert +from sympy.polys.polytools import gcd, Poly +from sympy.utilities.misc import filldedent, translate from sympy.utilities.iterables import flatten, uniq +from sympy.utilities.randtest import _randrange +def AZ(s=None): + """Return the letters of ``s`` in uppercase. In case more than + one string is passed, each of them will be processed and a list + of upper case strings will be returned. -def alphabet_of_cipher(symbols="ABCDEFGHIJKLMNOPQRSTUVWXYZ"): - """ - Returns the list of characters in the string input defining the alphabet. + Examples + ======== - Notes - ===== + >>> from sympy.crypto.crypto import AZ + >>> AZ('Hello, world!') + 'HELLOWORLD' + >>> AZ('Hello, world!'.split()) + ['HELLO', 'WORLD'] - First, some basic definitions. + See Also + ======== + check_and_join + """ + if not s: + return uppercase + t = type(s) is str + if t: + s = [s] + rv = [check_and_join(i.upper().split(), uppercase, filter=True) + for i in s] + if t: + return rv[0] + return rv + +bifid5 = AZ().replace('J', '') +bifid6 = AZ() + '0123456789' +bifid10 = printable + + +def padded_key(key, symbols, filter=True): + """Return a string of the distinct characters of ``symbols`` with + those of ``key`` appearing first, omitting characters in ``key`` + that are not in ``symbols``. A ValueError is raised if a) there are + duplicate characters in ``symbols`` or b) there are characters + in ``key`` that are not in ``symbols``. - A *substitution cipher* is a method of encryption by which - "units" (not necessarily characters) of plaintext are replaced with - ciphertext according to a regular system. The "units" may be - characters (ie, words of length `1`), words of length `2`, and so forth. + Examples + ======== - A *transposition cipher* is a method of encryption by which - the positions held by "units" of plaintext are replaced by a - permutation of the plaintext. That is, the order of the units is - changed using a bijective function on the characters' positions - to perform the encryption. + >>> from sympy.crypto.crypto import padded_key + >>> padded_key('PUPPY', 'OPQRSTUVWXY') + 'PUYOQRSTVWX' + >>> padded_key('RSA', 'ARTIST') + Traceback (most recent call last): + ... + ValueError: duplicate characters in symbols: T + """ + syms = list(uniq(symbols)) + if len(syms) != len(symbols): + extra = ''.join(sorted(set( + [i for i in symbols if symbols.count(i) > 1]))) + raise ValueError('duplicate characters in symbols: %s' % extra) + extra = set(key) - set(syms) + if extra: + raise ValueError( + 'characters in key but not symbols: %s' % ''.join( + sorted(extra))) + key0 = ''.join(list(uniq(key))) + return key0 + ''.join([i for i in syms if i not in key0]) + + +def check_and_join(phrase, symbols=None, filter=None): + """ + Joins characters of `phrase` and if ``symbols`` is given, raises + an error if any character in ``phrase`` is not in ``symbols``. - A *monoalphabetic cipher* uses fixed substitution over the entire - message, whereas a *polyalphabetic cipher* uses a number of substitutions - at different times in the message. + Parameters + ========== - Each of these ciphers require an alphabet for the messages to be - constructed from. + phrase: string or list of strings to be returned as a string + symbols: iterable of characters allowed in ``phrase``; + if ``symbols`` is None, no checking is performed Examples ======== - >>> from sympy.crypto.crypto import alphabet_of_cipher - >>> alphabet_of_cipher() - ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', - 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'] - >>> L = [str(i) for i in range(10)] + ['a', 'b', 'c']; L - ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c'] - >>> A = "".join(L); A - '0123456789abc' - >>> alphabet_of_cipher(A) - ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c'] - >>> alphabet_of_cipher() - ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', - 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'] + >>> from sympy.crypto.crypto import check_and_join + >>> check_and_join('a phrase') + 'a phrase' + >>> check_and_join('a phrase'.upper().split()) + 'APHRASE' + >>> check_and_join('a phrase!'.upper().split(), 'ARE', filter=True) + 'ARAE' + >>> check_and_join('a phrase!'.upper().split(), 'ARE') + Traceback (most recent call last): + ... + ValueError: characters in phrase but not symbols: "!HPS" """ - symbols = "".join(symbols) - return list(symbols) - - -######## shift cipher examples ############ + rv = ''.join(''.join(phrase)) + if symbols is not None: + symbols = check_and_join(symbols) + missing = ''.join(list(sorted(set(rv) - set(symbols)))) + if missing: + if not filter: + raise ValueError( + 'characters in phrase but not symbols: "%s"' % missing) + rv = translate(rv, None, missing) + return rv + + +def _prep(msg, key, alp, default=None): + if not alp: + if not default: + alp = AZ() + msg = AZ(msg) + key = AZ(key) + else: + alp = default + else: + alp = ''.join(alp) + key = check_and_join(key, alp, filter=True) + msg = check_and_join(msg, alp, filter=True) + return msg, key, alp def cycle_list(k, n): """ - Returns the cyclic shift of the list range(n) by k. + Returns the elements of the list ``range(n)`` shifted to the + left by ``k`` (so the list starts with ``k`` (mod ``n``)). Examples ======== - >>> from sympy.crypto.crypto import cycle_list, alphabet_of_cipher - >>> L = cycle_list(3,26); L - [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 25, 0, 1, 2] - >>> A = alphabet_of_cipher(); A - ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', - 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'] - >>> [A[i] for i in L] - ['D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', - 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'A', 'B', 'C'] + >>> from sympy.crypto.crypto import cycle_list + >>> cycle_list(3, 10) + [3, 4, 5, 6, 7, 8, 9, 0, 1, 2] """ - L = list(range(n)) - return L[k:] + L[:k] + k = k % n + return list(range(k, n)) + list(range(k)) + + +######## shift cipher examples ############ -def encipher_shift(pt, key, symbols="ABCDEFGHIJKLMNOPQRSTUVWXYZ"): +def encipher_shift(msg, key, symbols=None): """ - Performs shift cipher encryption on plaintext pt, and returns the ciphertext. + Performs shift cipher encryption on plaintext msg, and returns the + ciphertext. Notes ===== @@ -104,7 +174,7 @@ def encipher_shift(pt, key, symbols="ABCDEFGHIJKLMNOPQRSTUVWXYZ"): The shift cipher is also called the Caesar cipher, after Julius Caesar, who, according to Suetonius, used it with a shift of three to protect messages of military significance. - Caesar's nephew Augustus reportedtly used a similar cipher, but + Caesar's nephew Augustus reportedly used a similar cipher, but with a right shift of 1. @@ -112,154 +182,249 @@ def encipher_shift(pt, key, symbols="ABCDEFGHIJKLMNOPQRSTUVWXYZ"): INPUT: - ``k``: an integer from 0 to 25 (the secret key) + ``key``: an integer (the secret key) - ``m``: string of upper-case letters (the plaintext message) + ``msg``: plaintext of upper-case letters OUTPUT: - ``c``: string of upper-case letters (the ciphertext message) + ``ct``: ciphertext of upper-case letters STEPS: - 0. Identify the alphabet A, ..., Z with the integers 0, ..., 25. - 1. Compute from the string ``m`` a list ``L1`` of corresponding - integers. + 0. Number the letters of the alphabet from 0, ..., N + 1. Compute from the string ``msg`` a list ``L1`` of + corresponding integers. 2. Compute from the list ``L1`` a new list ``L2``, given by adding ``(k mod 26)`` to each element in ``L1``. - 3. Compute from the list ``L2`` a string ``c`` of corresponding - letters. + 3. Compute from the list ``L2`` a string ``ct`` of + corresponding letters. Examples ======== - >>> from sympy.crypto.crypto import encipher_shift - >>> pt = "GONAVYBEATARMY" - >>> encipher_shift(pt, 1) + >>> from sympy.crypto.crypto import encipher_shift, decipher_shift + >>> msg = "GONAVYBEATARMY" + >>> ct = encipher_shift(msg, 1); ct 'HPOBWZCFBUBSNZ' - >>> encipher_shift(pt, 0) + + To decipher the shifted text, change the sign of the key: + + >>> encipher_shift(ct, -1) + 'GONAVYBEATARMY' + + There is also a convenience function that does this with the + original key: + + >>> decipher_shift(ct, 1) 'GONAVYBEATARMY' - >>> encipher_shift(pt, -1) - 'FNMZUXADZSZQLX' + """ + msg, _, A = _prep(msg, '', symbols) + shift = len(A) - key % len(A) + key = A[shift:] + A[:shift] + return translate(msg, key, A) + +def decipher_shift(msg, key, symbols=None): """ - symbols = "".join(symbols) - A = alphabet_of_cipher(symbols) - n = len(A) - L = cycle_list(key, n) - C = [A[(A.index(pt[i]) + key) % n] for i in range(len(pt))] - return "".join(C) + Return the text by shifting the characters of ``msg`` to the + left by the amount given by ``key``. + + Examples + ======== + + >>> from sympy.crypto.crypto import encipher_shift, decipher_shift + >>> msg = "GONAVYBEATARMY" + >>> ct = encipher_shift(msg, 1); ct + 'HPOBWZCFBUBSNZ' + + To decipher the shifted text, change the sign of the key: + + >>> encipher_shift(ct, -1) + 'GONAVYBEATARMY' + + Or use this function with the original key: + + >>> decipher_shift(ct, 1) + 'GONAVYBEATARMY' + """ + return encipher_shift(msg, -key, symbols) ######## affine cipher examples ############ -def encipher_affine(pt, key, symbols="ABCDEFGHIJKLMNOPQRSTUVWXYZ"): +def encipher_affine(msg, key, symbols=None, _inverse=False): r""" - Performs the affine cipher encryption on plaintext ``pt``, and returns the ciphertext. + Performs the affine cipher encryption on plaintext ``msg``, and + returns the ciphertext. - Encryption is based on the map `x \rightarrow ax+b` (mod `26`). Decryption is based on - the map `x \rightarrow cx+d` (mod `26`), where `c = a^{-1}` (mod `26`) and - `d = -a^{-1}c` (mod `26`). (In particular, for the map to be invertible, - we need `\mathrm{gcd}(a, 26) = 1.`) + Encryption is based on the map `x \rightarrow ax+b` (mod `N`) + where ``N`` is the number of characters in the alphabet. + Decryption is based on the map `x \rightarrow cx+d` (mod `N`), + where `c = a^{-1}` (mod `N`) and `d = -a^{-1}b` (mod `N`). + In particular, for the map to be invertible, we need + `\mathrm{gcd}(a, N) = 1` and an error will be raised if this is + not true. Notes ===== - This is a straightforward generalization of the shift cipher. + This is a straightforward generalization of the shift cipher with + the added complexity of requiring 2 characters to be deciphered in + order to recover the key. ALGORITHM: INPUT: - ``a, b``: a pair integers, where ``gcd(a, 26) = 1`` (the secret key) + ``msg``: string of characters that appear in ``symbols`` - ``m``: string of upper-case letters (the plaintext message) + ``a, b``: a pair integers, with ``gcd(a, N) = 1`` + (the secret key) + + ``symbols``: string of characters (default = uppercase + letters). When no symbols are given, ``msg`` is converted + to upper case letters and all other charactes are ignored. OUTPUT: - ``c``: string of upper-case letters (the ciphertext message) + ``ct``: string of characters (the ciphertext message) STEPS: - 0. Identify the alphabet "A", ..., "Z" with the integers 0, ..., 25. - 1. Compute from the string ``m`` a list ``L1`` of corresponding - integers. - 2. Compute from the list ``L1`` a new list ``L2``, given by replacing - ``x`` by ``a*x + b (mod 26)``, for each element ``x`` in ``L1``. - 3. Compute from the list ``L2`` a string ``c`` of corresponding - letters. + 0. Number the letters of the alphabet from 0, ..., N + 1. Compute from the string ``msg`` a list ``L1`` of + corresponding integers. + 2. Compute from the list ``L1`` a new list ``L2``, given by + replacing ``x`` by ``a*x + b (mod N)``, for each element + ``x`` in ``L1``. + 3. Compute from the list ``L2`` a string ``ct`` of + corresponding letters. + + See Also + ======== + decipher_affine + + """ + msg, _, A = _prep(msg, '', symbols) + N = len(A) + a, b = key + assert gcd(a, N) == 1 + if _inverse: + c = mod_inverse(a, N) + d = -b*c + a, b = c, d + B = ''.join([A[(a*i + b) % N] for i in range(N)]) + return translate(msg, A, B) + + +def decipher_affine(msg, key, symbols=None): + r""" + Return the deciphered text that was made from the mapping, + `x \rightarrow ax+b` (mod `N`), where ``N`` is the + number of characters in the alphabet. Deciphering is done by + reciphering with a new key: `x \rightarrow cx+d` (mod `N`), + where `c = a^{-1}` (mod `N`) and `d = -a^{-1}b` (mod `N`). Examples ======== - >>> from sympy.crypto.crypto import encipher_affine - >>> pt = "GONAVYBEATARMY" - >>> encipher_affine(pt, (1, 1)) - 'HPOBWZCFBUBSNZ' - >>> encipher_affine(pt, (1, 0)) - 'GONAVYBEATARMY' - >>> pt = "GONAVYBEATARMY" - >>> encipher_affine(pt, (3, 1)) + >>> from sympy.crypto.crypto import encipher_affine, decipher_affine + >>> msg = "GO NAVY BEAT ARMY" + >>> key = (3, 1) + >>> encipher_affine(msg, key) 'TROBMVENBGBALV' - >>> ct = "TROBMVENBGBALV" - >>> encipher_affine(ct, (9, 17)) + >>> decipher_affine(_, key) 'GONAVYBEATARMY' """ - symbols = "".join(symbols) - A = alphabet_of_cipher(symbols) - n = len(A) - k1 = key[0] # multiplicative coeff "a" - k2 = key[1] # additive coeff "b" - L = cycle_list(k2, n) - C = [A[(k1*A.index(pt[i]) + k2) % n] for i in range(len(pt))] - return "".join(C) + return encipher_affine(msg, key, symbols, _inverse=True) #################### substitution cipher ########################### -def encipher_substitution(pt, key, symbols="ABCDEFGHIJKLMNOPQRSTUVWXYZ"): +def encipher_substitution(msg, old, new=None): """ - Performs the substitution cipher encryption on plaintext ``pt``, and returns the ciphertext. + Returns the ciphertext obtained by replacing each character that + appears in ``old`` with the corresponding character in ``new``. + If ``old`` is a mapping, then new is ignored and the replacements + defined by ``old`` are used. + + Notes + ===== - Assumes the ``pt`` has only letters taken from ``symbols``. - Assumes ``key`` is a permutation of the symbols. This function permutes the - letters of the plaintext using the permutation given in ``key``. - The decription uses the inverse permutation. - Note that if the permutation in key is order 2 (eg, a transposition) then - the encryption permutation and the decryption permutation are the same. + This is a more general than the affine cipher in that the key can + only be recovered by determining the mapping for each symbol. + Though in practice, once a few symbols are recognized the mappings + for other characters can be quickly guessed. Examples ======== - >>> from sympy.crypto.crypto import alphabet_of_cipher, encipher_substitution - >>> symbols = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - >>> A = alphabet_of_cipher(symbols) - >>> key = "BACDEFGHIJKLMNOPQRSTUVWXYZ" - >>> pt = "go navy! beat army!" - >>> encipher_substitution(pt, key) - 'GONBVYAEBTBRMY' - >>> ct = 'GONBVYAEBTBRMY' - >>> encipher_substitution(ct, key) + >>> from sympy.crypto.crypto import encipher_substitution, AZ + >>> old = 'OEYAG' + >>> new = '034^6' + >>> msg = AZ("go navy! beat army!") + >>> ct = encipher_substitution(msg, old, new); ct + '60N^V4B3^T^RM4' + + To decrypt a substitution, reverse the last two arguments: + + >>> encipher_substitution(ct, new, old) 'GONAVYBEATARMY' + In the special case where ``old`` and ``new`` are a permuation of + order 2 (representing a transposition of characters) their order + is immaterial: + + >>> old = 'NAVY' + >>> new = 'ANYV' + >>> encipher = lambda x: encipher_substitution(x, old, new) + >>> encipher('NAVY') + 'ANYV' + >>> encipher(_) + 'NAVY' + + The substitution cipher, in general, is a method + whereby "units" (not necessarily single characters) of plaintext + are replaced with ciphertext according to a regular system. + + >>> ords = dict(zip('abc', ['\\%i' % ord(i) for i in 'abc'])) + >>> print(encipher_substitution('abc', ords)) + \97\98\99 """ - symbols = "".join(symbols) - A = alphabet_of_cipher(symbols) - n = len(A) - pt0 = [x.capitalize() for x in pt if x.isalnum()] - ct = [key[A.index(x)] for x in pt0] - return "".join(ct) + return translate(msg, old, new) ###################################################################### #################### Vigenère cipher examples ######################## ###################################################################### - -def encipher_vigenere(pt, key, symbols="ABCDEFGHIJKLMNOPQRSTUVWXYZ"): +def encipher_vigenere(msg, key, symbols=None): """ - Performs the Vigenère cipher encryption on plaintext ``pt``, and returns the ciphertext. + Performs the Vigenère cipher encryption on plaintext ``msg``, and + returns the ciphertext. + + Examples + ======== + + >>> from sympy.crypto.crypto import encipher_vigenere, AZ + >>> key = "encrypt" + >>> msg = "meet me on monday" + >>> encipher_vigenere(msg, key) + 'QRGKKTHRZQEBPR' + + Section 1 of the Kryptos sculpture at the CIA headquarters + uses this cipher and also changes the order of the the + alphabet [2]_. Here is the first line of that section of + the sculpture: + + >>> from sympy.crypto.crypto import decipher_vigenere, padded_key + >>> alp = padded_key('KRYPTOS', AZ()) + >>> key = 'PALIMPSEST' + >>> msg = 'EMUFPHZLRFAXYUSDJKZLDKRNSHGNFIVJ' + >>> decipher_vigenere(msg, key, alp) + 'BETWEENSUBTLESHADINGANDTHEABSENC' Notes ===== @@ -270,15 +435,16 @@ def encipher_vigenere(pt, key, symbols="ABCDEFGHIJKLMNOPQRSTUVWXYZ"): The so-called *Vigenère cipher* was actually invented by Giovan Batista Belaso in 1553. - This cipher was used in the 1800's, for example, during the American Civil War. - The Confederacy used a brass cipher disk to implement the Vigenère cipher - (now on display in the NSA Museum in Fort Meade) [1]_. + This cipher was used in the 1800's, for example, during the American + Civil War. The Confederacy used a brass cipher disk to implement the + Vigenère cipher (now on display in the NSA Museum in Fort + Meade) [1]_. The Vigenère cipher is a generalization of the shift cipher. - Whereas the shift cipher shifts each letter by the same amount (that amount - being the key of the shift cipher) the Vigenère cipher shifts - a letter by an amount determined by the key (which is a word or - phrase known only to the sender and receiver). + Whereas the shift cipher shifts each letter by the same amount + (that amount being the key of the shift cipher) the Vigenère + cipher shifts a letter by an amount determined by the key (which is + a word or phrase known only to the sender and receiver). For example, if the key was a single letter, such as "C", then the so-called Vigenere cipher is actually a shift cipher with a @@ -294,32 +460,42 @@ def encipher_vigenere(pt, key, symbols="ABCDEFGHIJKLMNOPQRSTUVWXYZ"): INPUT: - ``key``: a string of upper-case letters (the secret key) + ``msg``: string of characters that appear in ``symbols`` + (the plaintext) + + ``key``: a string of characters that appear in ``symbols`` + (the secret key) + + ``symbols``: a string of letters defining the alphabet - ``m``: string of upper-case letters (the plaintext message) OUTPUT: - ``c``: string of upper-case letters (the ciphertext message) + ``ct``: string of characters (the ciphertext message) STEPS: - 0. Identify the alphabet A, ..., Z with the integers 0, ..., 25. - 1. Compute from the string ``key`` a list ``L1`` of corresponding - integers. Let ``n1 = len(L1)``. - 2. Compute from the string ``m`` a list ``L2`` of corresponding - integers. Let ``n2 = len(L2)``. - 3. Break ``L2`` up sequencially into sublists of size ``n1``, and one sublist - at the end of size smaller or equal to ``n1``. - 4. For each of these sublists ``L`` of ``L2``, compute a new list ``C`` given by - ``C[i] = L[i] + L1[i] (mod 26)`` to the ``i``-th element in the sublist, - for each ``i``. - 5. Assemble these lists ``C`` by concatenation into a new list of length ``n2``. - 6. Compute from the new list a string ``c`` of corresponding letters. - - Once it is known that the key is, say, `n` characters long, frequency analysis - can be applied to every `n`-th letter of the ciphertext to determine the plaintext. - This method is called *Kasiski examination* (although it was first discovered - by Babbage). + 0. Number the letters of the alphabet from 0, ..., N + 1. Compute from the string ``key`` a list ``L1`` of + corresponding integers. Let ``n1 = len(L1)``. + 2. Compute from the string ``msg`` a list ``L2`` of + corresponding integers. Let ``n2 = len(L2)``. + 3. Break ``L2`` up sequencially into sublists of size + ``n1``; the last sublist may be smaller than ``n1`` + 4. For each of these sublists ``L`` of ``L2``, compute a + new list ``C`` given by ``C[i] = L[i] + L1[i] (mod N)`` + to the ``i``-th element in the sublist, for each ``i``. + 5. Assemble these lists ``C`` by concatenation into a new + list of length ``n2``. + 6. Compute from the new list a string ``ct`` of + corresponding letters. + + Once it is known that the key is, say, `n` characters long, + frequency analysis can be applied to every `n`-th letter of + the ciphertext to determine the plaintext. This method is + called *Kasiski examination* (although it was first discovered + by Babbage). If they key is as long as the message and is + comprised of randomly selected characters -- a one-time pad -- the + message is theoretically unbreakable. The cipher Vigenère actually discovered is an "auto-key" cipher described as follows. @@ -328,57 +504,70 @@ def encipher_vigenere(pt, key, symbols="ABCDEFGHIJKLMNOPQRSTUVWXYZ"): INPUT: - ``key``: a string of upper-case letters (the secret key) + ``key``: a string of letters (the secret key) - ``m``: string of upper-case letters (the plaintext message) + ``msg``: string of letters (the plaintext message) OUTPUT: - ``c``: string of upper-case letters (the ciphertext message) + ``ct``: string of upper-case letters (the ciphertext message) STEPS: - 0. Identify the alphabet A, ..., Z with the integers 0, ..., 25. - 1. Compute from the string ``m`` a list ``L2`` of corresponding - integers. Let ``n2 = len(L2)``. - 2. Let ``n1`` be the length of the key. Concatenate the string - ``key`` with the first ``n2 - n1`` characters of the plaintext message. - Compute from this string of length ``n2`` a list ``L1`` of corresponding - integers. Note ``n2 = len(L1)``. - 3. Compute a new list ``C`` given by ``C[i] = L1[i] + L2[i] (mod 26)``, - for each ``i``. Note ``n2 = len(C)``. - 4. Compute from the new list a string ``c`` of corresponding letters. + 0. Number the letters of the alphabet from 0, ..., N + 1. Compute from the string ``msg`` a list ``L2`` of + corresponding integers. Let ``n2 = len(L2)``. + 2. Let ``n1`` be the length of the key. Append to the + string ``key`` the first ``n2 - n1`` characters of + the plaintext message. Compute from this string (also of + length ``n2``) a list ``L1`` of integers corresponding + to the letter numbers in the first step. + 3. Compute a new list ``C`` given by + ``C[i] = L1[i] + L2[i] (mod N)``. + 4. Compute from the new list a string ``ct`` of letters + corresponding to the new integers. + + To decipher the auto-key ciphertext, the key is used to decipher + the first ``n1`` characters and then those characters become the + key to decipher the next ``n1`` characters, etc...: + + >>> m = AZ('go navy, beat army! yes you can'); m + 'GONAVYBEATARMYYESYOUCAN' + >>> key = AZ('gold bug'); n1 = len(key); n2 = len(m) + >>> auto_key = key + m[:n2 - n1]; auto_key + 'GOLDBUGGONAVYBEATARMYYE' + >>> ct = encipher_vigenere(m, auto_key); ct + 'MCYDWSHKOGAMKZCELYFGAYR' + >>> n1 = len(key) + >>> pt = [] + >>> while ct: + ... part, ct = ct[:n1], ct[n1:] + ... pt.append(decipher_vigenere(part, key)) + ... key = pt[-1] + ... + >>> ''.join(pt) == m + True References ========== .. [1] http://en.wikipedia.org/wiki/Vigenere_cipher - - Examples - ======== - - >>> from sympy.crypto.crypto import encipher_vigenere - >>> key = "encrypt" - >>> pt = "meet me on monday" - >>> encipher_vigenere(pt, key) - 'QRGKKTHRZQEBPR' + .. [2] http://web.archive.org/web/20071116100808/ + http://filebox.vt.edu/users/batman/kryptos.html """ - symbols = "".join(symbols) - A = alphabet_of_cipher(symbols) - N = len(A) # normally, 26 - key0 = uniq(key) - key0 = [x.capitalize() for x in key0 if x.isalnum()] - K = [A.index(x) for x in key0] - k = len(K) - pt0 = [x.capitalize() for x in pt if x.isalnum()] - P = [A.index(x) for x in pt0] - n = len(P) - #m = n//k - C = [(K[i % k] + P[i]) % N for i in range(n)] - return "".join([str(A[x]) for x in C]) - - -def decipher_vigenere(ct, key, symbols="ABCDEFGHIJKLMNOPQRSTUVWXYZ"): + msg, key, A = _prep(msg, key, symbols) + map = dict([(c, i) for i, c in enumerate(A)]) + key = [map[c] for c in key] + N = len(map) + k = len(key) + rv = [] + for i, m in enumerate(msg): + rv.append(A[(map[m] + key[i % k]) % N]) + rv = ''.join(rv) + return rv + + +def decipher_vigenere(msg, key, symbols=None): """ Decode using the Vigenère cipher. @@ -390,157 +579,360 @@ def decipher_vigenere(ct, key, symbols="ABCDEFGHIJKLMNOPQRSTUVWXYZ"): >>> ct = "QRGK kt HRZQE BPR" >>> decipher_vigenere(ct, key) 'MEETMEONMONDAY' - """ - symbols = "".join(symbols) - A = alphabet_of_cipher(symbols) + msg, key, A = _prep(msg, key, symbols) + map = dict([(c, i) for i, c in enumerate(A)]) N = len(A) # normally, 26 - key0 = uniq(key) - key0 = [x.capitalize() for x in key0 if x.isalnum()] - K = [A.index(x) for x in key0] - k = len(K) - ct0 = [x.capitalize() for x in ct if x.isalnum()] - C = [A.index(x) for x in ct0] - n = len(C) - #m = n//k - P = [(-K[i % k] + C[i]) % N for i in range(n)] - return "".join([str(A[x]) for x in P]) + K = [map[c] for c in key] + n = len(K) + C = [map[c] for c in msg] + rv = ''.join([A[(-K[i % n] + c) % N] for i, c in enumerate(C)]) + return rv #################### Hill cipher ######################## -def encipher_hill(pt, key, symbols="ABCDEFGHIJKLMNOPQRSTUVWXYZ"): +def encipher_hill(msg, key, symbols=None, pad="Q"): r""" - Performs the Hill cipher encryption on plaintext ``pt``, and returns the ciphertext. + Return the Hill cipher encryption of ``msg``. Notes ===== The Hill cipher [1]_, invented by Lester S. Hill in the 1920's [2]_, - was the first polygraphic cipher in which it was practical (though barely) - to operate on more than three symbols at once. The following discussion assumes - an elementary knowledge of matrices. - - First, each letter is first encoded as a number. We assume here that - "A" `\leftrightarrow` 0, "B" `\leftrightarrow` 1, ..., "Z" `\leftrightarrow` 25. - We denote the integers `\{0, 1, ..., 25\}` - by `Z_{26}`. Suppose your message m consists of `n` capital letters, with no spaces. - This may be regarded an `n`-tuple M of elements of `Z_{26}`. A key in the Hill cipher - is a `k x k` matrix `K`, all of whose entries are in `Z_{26}`, such that the matrix - `K` is invertible (ie, that the linear transformation `K: Z_{26}^k \rightarrow Z_{26}^k` + was the first polygraphic cipher in which it was practical + (though barely) to operate on more than three symbols at once. + The following discussion assumes an elementary knowledge of + matrices. + + First, each letter is first encoded as a number starting with 0. + Suppose your message `msg` consists of `n` capital letters, with no + spaces. This may be regarded an `n`-tuple M of elements of + `Z_{26}` (if the letters are those of the English alphabet). A key + in the Hill cipher is a `k x k` matrix `K`, all of whose entries + are in `Z_{26}`, such that the matrix `K` is invertible (i.e., the + linear transformation `K: Z_{N}^k \rightarrow Z_{N}^k` is one-to-one). ALGORITHM: INPUT: - ``key``: a `k x k` invertible matrix `K`, all of whose entries are in `Z_{26}` + ``msg``: plaintext message of `n` upper-case letters - ``m``: string of `n` upper-case letters (the plaintext message) - (Note: Sage assumes that `n` is a multiple of `k`.) + ``key``: a `k x k` invertible matrix `K`, all of whose + entries are in `Z_{26}` (or whatever number of symbols + are being used). + + ``pad``: character (default "Q") to use to make length + of text be a multiple of ``k`` OUTPUT: - ``c``: string of upper-case letters (the ciphertext message) + ``ct``: ciphertext of upper-case letters STEPS: - 0. Identify the alphabet A, ..., Z with the integers 0, ..., 25. - 1. Compute from the string ``m`` a list ``L`` of corresponding - integers. Let ``n = len(L)``. - 2. Break the list ``L`` up into ``t = ceiling(n/k)`` sublists - ``L_1``, ..., ``L_t`` of size ``k`` (where the last list might be - "padded" by 0's to ensure it is size ``k``). - 3. Compute new list ``C_1``, ..., ``C_t`` given by ``C[i] = K*L_i`` - (arithmetic is done mod 26), for each ``i``. + 0. Number the letters of the alphabet from 0, ..., N + 1. Compute from the string ``msg`` a list ``L`` of + corresponding integers. Let ``n = len(L)``. + 2. Break the list ``L`` up into ``t = ceiling(n/k)`` + sublists ``L_1``, ..., ``L_t`` of size ``k`` (with + the last list "padded" to ensure its size is + ``k``). + 3. Compute new list ``C_1``, ..., ``C_t`` given by + ``C[i] = K*L_i`` (arithmetic is done mod N), for each + ``i``. 4. Concatenate these into a list ``C = C_1 + ... + C_t``. - 5. Compute from ``C`` a string ``c`` of corresponding letters. - This has length ``k*t``. + 5. Compute from ``C`` a string ``ct`` of corresponding + letters. This has length ``k*t``. References ========== .. [1] en.wikipedia.org/wiki/Hill_cipher - .. [2] Lester S. Hill, Cryptography in an Algebraic Alphabet, The American - Mathematical Monthly Vol.36, June-July 1929, pp.306-312. + .. [2] Lester S. Hill, Cryptography in an Algebraic Alphabet, + The American Mathematical Monthly Vol.36, June-July 1929, + pp.306-312. - Examples + See Also ======== - - >>> from sympy.crypto.crypto import encipher_hill - >>> from sympy import Matrix - >>> pt = "meet me on monday" - >>> key = Matrix([[1, 2], [3, 5]]) - >>> encipher_hill(pt, key) - 'UEQDUEODOCTCWQ' - >>> pt = "meet me on tuesday" - >>> encipher_hill(pt, key) - 'UEQDUEODHBOYDJYU' - >>> pt = "GONAVYBEATARMY" - >>> key = Matrix([[1, 0, 1], [0, 1, 1], [2, 2, 3]]) - >>> encipher_hill(pt, key) - 'TBBYTKBEKKRLMYU' + decipher_hill """ - symbols = "".join(symbols) - A = alphabet_of_cipher(symbols) - N = len(A) # normally, 26 + assert key.is_square + assert len(pad) == 1 + msg, pad, A = _prep(msg, pad, symbols) + map = dict([(c, i) for i, c in enumerate(A)]) + P = [map[c] for c in msg] + N = len(A) k = key.cols - pt0 = [x.capitalize() for x in pt if x.isalnum()] - P = [A.index(x) for x in pt0] n = len(P) - m = n//k - if n > m*k: - P = P + [0]*(n - m*k) - m = m + 1 - C = [list(key*Matrix(k, 1, [P[i] for i in range(k*j, k*(j + 1))])) for j in range(m)] - C = flatten(C) - return "".join([A[i % N] for i in C]) + m, r = divmod(n, k) + if r: + P = P + [map[pad]]*(k - r) + m += 1 + rv = ''.join([A[c % N] for j in range(m) for c in + list(key*Matrix(k, 1, [P[i] + for i in range(k*j, k*(j + 1))]))]) + return rv -def decipher_hill(ct, key, symbols="ABCDEFGHIJKLMNOPQRSTUVWXYZ"): +def decipher_hill(msg, key, symbols=None): """ - Deciphering is the same as enciphering but using the inverse of the key matrix. + Deciphering is the same as enciphering but using the inverse of the + key matrix. Examples ======== - >>> from sympy.crypto.crypto import decipher_hill + >>> from sympy.crypto.crypto import encipher_hill, decipher_hill >>> from sympy import Matrix - >>> ct = "UEQDUEODOCTCWQ" + >>> key = Matrix([[1, 2], [3, 5]]) - >>> decipher_hill(ct, key) + >>> encipher_hill("meet me on monday", key) + 'UEQDUEODOCTCWQ' + >>> decipher_hill(_, key) 'MEETMEONMONDAY' - >>> ct = "UEQDUEODHBOYDJYU" - >>> decipher_hill(ct, key) - 'MEETMEONTUESDAYA' + + When the length of the plaintext (stripped of invalid characters) + is not a multiple of the key dimension, extra characters will + appear at the end of the enciphered and deciphered text. In order to + decipher the text, those characters must be included in the text to + be deciphered. In the following, the key has a dimension of 4 but + the text is 2 short of being a multiple of 4 so two characters will + be added. + + >>> key = Matrix([[1, 1, 1, 2], [0, 1, 1, 0], + ... [2, 2, 3, 4], [1, 1, 0, 1]]) + >>> msg = "ST" + >>> encipher_hill(msg, key) + 'HJEB' + >>> decipher_hill(_, key) + 'STQQ' + >>> encipher_hill(msg, key, pad="Z") + 'ISPK' + >>> decipher_hill(_, key) + 'STZZ' + + If the last two characters of the ciphertext were ignored in + either case, the wrong plaintext would be recovered: + + >>> decipher_hill("HD", key) + 'ORMV' + >>> decipher_hill("IS", key) + 'UIKY' """ - symbols = "".join(symbols) - A = alphabet_of_cipher(symbols) - N = len(A) # normally, 26 + assert key.is_square + msg, _, A = _prep(msg, '', symbols) + map = dict([(c, i) for i, c in enumerate(A)]) + C = [map[c] for c in msg] + N = len(A) k = key.cols - ct0 = [x.capitalize() for x in ct if x.isalnum()] - C = [A.index(x) for x in ct0] n = len(C) - m = n//k - if n > m*k: - C = C + [0]*(n - m*k) - m = m + 1 + m, r = divmod(n, k) + if r: + C = C + [0]*(k - r) + m += 1 key_inv = key.inv_mod(N) - P = [list(key_inv*Matrix(k, 1, [C[i] for i in range(k*j, k*(j + 1))])) for j in range(m)] - P = flatten(P) - return "".join([A[i % N] for i in P]) + rv = ''.join([A[p % N] for j in range(m) for p in + list(key_inv*Matrix( + k, 1, [C[i] for i in range(k*j, k*(j + 1))]))]) + return rv #################### Bifid cipher ######################## -def encipher_bifid5(pt, key): +def encipher_bifid(msg, key, symbols=None): + r""" + Performs the Bifid cipher encryption on plaintext ``msg``, and + returns the ciphertext. + + This is the version of the Bifid cipher that uses an `n \times n` + Polybius square. + + INPUT: + + ``msg``: plaintext string + + ``key``: short string for key; duplicate characters are + ignored and then it is padded with the characters in + ``symbols`` that were not in the short key + + ``symbols``: `n \times n` characters defining the alphabet + (default is string.printable) + + OUTPUT: + + ciphertext (using Bifid5 cipher without spaces) + + See Also + ======== + decipher_bifid, encipher_bifid5, encipher_bifid6 + + """ + msg, key, A = _prep(msg, key, symbols, bifid10) + long_key = ''.join(uniq(key)) or A + + n = len(A)**.5 + if n != int(n): + raise ValueError( + 'Length of alphabet (%s) is not a square number.' % len(A)) + N = int(n) + if len(long_key) < N**2: + long_key = list(long_key) + [x for x in A if x not in long_key] + + # the fractionalization + row_col = dict([(ch, divmod(i, N)) + for i, ch in enumerate(long_key)]) + r, c = zip(*[row_col[x] for x in msg]) + rc = r + c + ch = dict([(i, ch) for ch, i in row_col.items()]) + rv = ''.join((ch[i] for i in zip(rc[::2], rc[1::2]))) + return rv + + +def decipher_bifid(msg, key, symbols=None): r""" - Performs the Bifid cipher encryption on plaintext ``pt``, and returns the ciphertext. + Performs the Bifid cipher decryption on ciphertext ``msg``, and + returns the plaintext. + + This is the version of the Bifid cipher that uses the `n \times n` + Polybius square. + + INPUT: + + ``msg``: ciphertext string + + ``key``: short string for key; duplicate characters are + ignored and then it is padded with the characters in + ``symbols`` that were not in the short key + + ``symbols``: `n \times n` characters defining the alphabet + (default=string.printable, a `10 \times 10` matrix) + + OUTPUT: - This is the version of the Bifid cipher that uses the `5 \times 5` Polybius square. + deciphered text + + Examples + ======== + + >>> from sympy.crypto.crypto import ( + ... encipher_bifid, decipher_bifid, AZ) + + Do an encryption using the bifid5 alphabet: + + >>> alp = AZ().replace('J', '') + >>> ct = AZ("meet me on monday!") + >>> key = AZ("gold bug") + >>> encipher_bifid(ct, key, alp) + 'IEILHHFSTSFQYE' + + When entering the text or ciphertext, spaces are ignored so it + can be formatted as desired. Re-entering the ciphertext from the + preceding, putting 4 characters per line and padding with an extra + J, does not cause problems for the deciphering: + + >>> decipher_bifid(''' + ... IEILH + ... HFSTS + ... FQYEJ''', key, alp) + 'MEETMEONMONDAY' + + When no alphabet is given, all 100 printable characters will be + used: + + >>> key = '' + >>> encipher_bifid('hello world!', key) + 'bmtwmg-bIo*w' + >>> decipher_bifid(_, key) + 'hello world!' + + If the key is changed, a different encryption is obtained: + + >>> key = 'gold bug' + >>> encipher_bifid('hello world!', 'gold_bug') + 'hg2sfuei7t}w' + + And if the key used to decrypt the message is not exact, the + original text will not be perfectly obtained: + + >>> decipher_bifid(_, 'gold pug') + 'heldo~wor6d!' + + """ + msg, _, A = _prep(msg, '', symbols, bifid10) + long_key = ''.join(uniq(key)) or A + + n = len(A)**.5 + if n != int(n): + raise ValueError( + 'Length of alphabet (%s) is not a square number.' % len(A)) + N = int(n) + if len(long_key) < N**2: + long_key = list(long_key) + [x for x in A if x not in long_key] + + # the reverse fractionalization + row_col = dict( + [(ch, divmod(i, N)) for i, ch in enumerate(long_key)]) + rc = [i for c in msg for i in row_col[c]] + n = len(msg) + rc = zip(*(rc[:n], rc[n:])) + ch = dict([(i, ch) for ch, i in row_col.items()]) + rv = ''.join((ch[i] for i in rc)) + return rv + + +def bifid_square(key): + """Return characters of ``key`` arranged in a square. + + Examples + ======== + + >>> from sympy.crypto.crypto import ( + ... bifid_square, AZ, padded_key, bifid5) + >>> bifid_square(AZ().replace('J', '')) + Matrix([ + [A, B, C, D, E], + [F, G, H, I, K], + [L, M, N, O, P], + [Q, R, S, T, U], + [V, W, X, Y, Z]]) + + >>> bifid_square(padded_key(AZ('gold bug!'), bifid5)) + Matrix([ + [G, O, L, D, B], + [U, A, C, E, F], + [H, I, K, M, N], + [P, Q, R, S, T], + [V, W, X, Y, Z]]) + + See Also + ======== + padded_key + """ + A = ''.join(uniq(''.join(key))) + n = len(A)**.5 + if n != int(n): + raise ValueError( + 'Length of alphabet (%s) is not a square number.' % len(A)) + n = int(n) + f = lambda i, j: Symbol(A[n*i + j]) + rv = Matrix(n, n, f) + return rv + + +def encipher_bifid5(msg, key): + r""" + Performs the Bifid cipher encryption on plaintext ``msg``, and + returns the ciphertext. + + This is the version of the Bifid cipher that uses the `5 \times 5` + Polybius square. The letter "J" is ignored so it must be replaced + with something else (traditionally an "I") before encryption. Notes ===== @@ -548,10 +940,11 @@ def encipher_bifid5(pt, key): The Bifid cipher was invented around 1901 by Felix Delastelle. It is a *fractional substitution* cipher, where letters are replaced by pairs of symbols from a smaller alphabet. The - cipher uses a `5 \times 5` square filled with some ordering of the alphabet, - except that "i"s and "j"s are identified (this is a so-called - Polybius square; there is a `6 \times 6` analog if you add back in "j" and also - append onto the usual 26 letter alphabet, the digits 0, 1, ..., 9). + cipher uses a `5 \times 5` square filled with some ordering of the + alphabet, except that "J" is replaced with "I" (this is a so-called + Polybius square; there is a `6 \times 6` analog if you add back in + "J" and also append onto the usual 26 letter alphabet, the digits + 0, 1, ..., 9). According to Helen Gaines' book *Cryptanalysis*, this type of cipher was used in the field by the German Army during World War I. @@ -559,110 +952,114 @@ def encipher_bifid5(pt, key): INPUT: - ``pt``: plaintext string (no "j"s) + ``msg``: plaintext string; converted to upper case and + filtered of anything but all letters except J. - ``key``: short string for key (no repetitions, no "j"s) + ``key``: short string for key; non-alphabetic letters, J + and duplicated characters are ignored and then, if the + length is less than 25 characters, it is padded with other + letters of the alphabet (in alphabetical order). OUTPUT: - ciphertext (using Bifid5 cipher in all caps, no spaces, no "J"s) + ciphertext (all caps, no spaces) STEPS: - 1. Create the `5 \times 5` Polybius square ``S`` associated to the ``k`` as - follows: - - a) starting top left, moving left-to-right, top-to-bottom, - place the letters of the key into a 5x5 matrix, - b) when finished, add the letters of the alphabet - not in the key until the 5x5 square is filled - - 2. Create a list ``P`` of pairs of numbers which are the coordinates - in the Polybius square of the letters in ``pt``. - 3. Let ``L1`` be the list of all first coordinates of ``P`` (length - of ``L1 = n``), let ``L2`` be the list of all second coordinates - of ``P`` (so the length of ``L2`` is also ``n``). - 4. Let ``L`` be the concatenation of ``L1`` and ``L2`` (length ``L = 2*n``), - except that consecutive numbers are paired ``(L[2*i], L[2*i + 1])``. - You can regard ``L`` as a list of pairs of length ``n``. - 5. Let ``C`` be the list of all letters which are of the form - ``S[i, j]``, for all ``(i, j)`` in ``L``. As a string, this - is the ciphertext ``ct``. + 0. Create the `5 \times 5` Polybius square ``S`` associated + to ``key`` as follows: + + a) moving from left-to-right, top-to-bottom, + place the letters of the key into a `5 \times 5` + matrix, + b) if the key has less than 25 letters, add the + letters of the alphabet not in the key until the + `5 \times 5` square is filled. + + 1. Create a list ``P`` of pairs of numbers which are the + coordinates in the Polybius square of the letters in + ``msg``. + 2. Let ``L1`` be the list of all first coordinates of ``P`` + (length of ``L1 = n``), let ``L2`` be the list of all + second coordinates of ``P`` (so the length of ``L2`` + is also ``n``). + 3. Let ``L`` be the concatenation of ``L1`` and ``L2`` + (length ``L = 2*n``), except that consecutive numbers + are paired ``(L[2*i], L[2*i + 1])``. You can regard + ``L`` as a list of pairs of length ``n``. + 4. Let ``C`` be the list of all letters which are of the + form ``S[i, j]``, for all ``(i, j)`` in ``L``. As a + string, this is the ciphertext of ``msg``. Examples ======== - >>> from sympy.crypto.crypto import encipher_bifid5 - >>> pt = "meet me on monday" - >>> key = "encrypt" - >>> encipher_bifid5(pt, key) - 'LNLLQNPPNPGADK' - >>> pt = "meet me on friday" - >>> encipher_bifid5(pt, key) - 'LNLLFGPPNPGRSK' - - """ - A = alphabet_of_cipher() - # first make sure the letters are capitalized - # and text has no spaces - key = uniq(key) - key0 = [x.capitalize() for x in key if x.isalnum()] - pt0 = [x.capitalize() for x in pt if x.isalnum()] - # create long key - long_key = key0 + [x for x in A if (not(x in key0) and x != "J")] - n = len(pt0) - # the fractionalization - pairs = [[long_key.index(x)//5, long_key.index(x) % 5] for x in pt0] - tmp_cipher = flatten([x[0] for x in pairs] + [x[1] for x in pairs]) - ct = "".join([long_key[5*tmp_cipher[2*i] + tmp_cipher[2*i + 1]] for i in range(n)]) - return ct + >>> from sympy.crypto.crypto import ( + ... encipher_bifid5, decipher_bifid5) + + "J" will be omitted unless it is replaced with somthing else: + + >>> round_trip = lambda m, k: \ + ... decipher_bifid5(encipher_bifid5(m, k), k) + >>> key = 'a' + >>> msg = "JOSIE" + >>> round_trip(msg, key) + 'OSIE' + >>> round_trip(msg.replace("J", "I"), key) + 'IOSIE' + >>> j = "QIQ" + >>> round_trip(msg.replace("J", j), key).replace(j, "J") + 'JOSIE' + + See Also + ======== + decipher_bifid5, encipher_bifid + + """ + msg, key, _ = _prep(msg.upper(), key.upper(), None, bifid5) + key = padded_key(key, bifid5) + return encipher_bifid(msg, '', key) -def decipher_bifid5(ct, key): +def decipher_bifid5(msg, key): r""" - Performs the Bifid cipher decryption on ciphertext ``ct``, and returns the plaintext. + Return the Bifid cipher decryption of ``msg``. - This is the version of the Bifid cipher that uses the `5 \times 5` Polybius square. + This is the version of the Bifid cipher that uses the `5 \times 5` + Polybius square; the letter "J" is ignored unless a ``key`` of + length 25 is used. INPUT: - ``ct``: ciphertext string (digits okay) + ``msg``: ciphertext string - ``key``: short string for key (no repetitions, digits okay) + ``key``: short string for key; duplicated characters are + ignored and if the length is less then 25 characters, it + will be padded with other letters from the alphabet omitting + "J". Non-alphabetic characters are ignored. OUTPUT: - plaintext from Bifid5 cipher (all caps, no spaces, no "J"s) + plaintext from Bifid5 cipher (all caps, no spaces) Examples ======== >>> from sympy.crypto.crypto import encipher_bifid5, decipher_bifid5 - >>> key = "encrypt" - >>> pt = "meet me on monday" - >>> encipher_bifid5(pt, key) - 'LNLLQNPPNPGADK' - >>> ct = 'LNLLQNPPNPGADK' - >>> decipher_bifid5(ct, key) + >>> key = "gold bug" + >>> encipher_bifid5('meet me on friday', key) + 'IEILEHFSTSFXEE' + >>> encipher_bifid5('meet me on monday', key) + 'IEILHHFSTSFQYE' + >>> decipher_bifid5(_, key) 'MEETMEONMONDAY' """ - A = alphabet_of_cipher() - # first make sure the letters are capitalized - # and text has no spaces - key = uniq(key) - key0 = [x.capitalize() for x in key if x.isalnum()] - ct0 = [x.capitalize() for x in ct if x.isalnum()] - # create long key - long_key = key0 + [x for x in A if (not(x in key0) and x != "J")] - n = len(ct0) - # the fractionalization - pairs = flatten([[long_key.index(x)//5, long_key.index(x) % 5] for x in ct0 if x != "J"]) - tmp_plain = flatten([[pairs[i], pairs[n + i]] for i in range(n)]) - pt = "".join([long_key[5*tmp_plain[2*i] + tmp_plain[2*i + 1]] for i in range(n)]) - return pt + msg, key, _ = _prep(msg.upper(), key.upper(), None, bifid5) + key = padded_key(key, bifid5) + return decipher_bifid(msg, '', key) -def bifid5_square(key): +def bifid5_square(key=None): r""" 5x5 Polybius square. @@ -681,75 +1078,60 @@ def bifid5_square(key): [V, W, X, Y, Z]]) """ - A = alphabet_of_cipher() - # first make sure the letters are capitalized - # and key has no spaces or duplicates - key = uniq(key) - key0 = [x.capitalize() for x in key if x.isalnum()] - # create long key - long_key = key0 + [x for x in A if (not(x in key0) and x != "J")] - f = lambda i, j: Symbol(long_key[5*i + j]) - M = Matrix(5, 5, f) - return M + if not key: + key = bifid5 + else: + _, key, _ = _prep('', key.upper(), None, bifid5) + key = padded_key(key, bifid5) + return bifid_square(key) -def encipher_bifid6(pt, key): +def encipher_bifid6(msg, key): r""" - Performs the Bifid cipher encryption on plaintext ``pt``, and returns the ciphertext. + Performs the Bifid cipher encryption on plaintext ``msg``, and + returns the ciphertext. - This is the version of the Bifid cipher that uses the `6 \times 6` Polybius square. - Assumes alphabet of symbols is "A", ..., "Z", "0", ..., "9". + This is the version of the Bifid cipher that uses the `6 \times 6` + Polybius square. INPUT: - ``pt``: plaintext string (digits okay) + ``msg``: plaintext string (digits okay) - ``key``: short string for key (no repetitions, digits okay) + ``key``: short string for key (digits okay). If ``key`` is + less than 36 characters long, the square will be filled with + letters A through Z and digits 0 through 9. OUTPUT: ciphertext from Bifid cipher (all caps, no spaces) - Examples + See Also ======== + decipher_bifid6, encipher_bifid - >>> from sympy.crypto.crypto import encipher_bifid6 - >>> key = "encrypt" - >>> pt = "meet me on monday at 8am" - >>> encipher_bifid6(pt, key) - 'HNHOKNTA5MEPEGNQZYG' - >>> encipher_bifid6(pt, key) - 'HNHOKNTA5MEPEGNQZYG' - - """ - A = alphabet_of_cipher() + [str(a) for a in range(10)] - # first make sure the letters are capitalized - # and text has no spaces - key = uniq(key) - key0 = [x.capitalize() for x in key if x.isalnum()] - pt0 = [x.capitalize() for x in pt if x.isalnum()] - # create long key - long_key = key0 + [x for x in A if not(x in key0)] - n = len(pt0) - # the fractionalization - pairs = [[long_key.index(x)//6, long_key.index(x) % 6] for x in pt0] - tmp_cipher = flatten([x[0] for x in pairs] + [x[1] for x in pairs]) - ct = "".join([long_key[6*tmp_cipher[2*i] + tmp_cipher[2*i + 1]] for i in range(n)]) - return ct + """ + msg, key, _ = _prep(msg.upper(), key.upper(), None, bifid6) + key = padded_key(key, bifid6) + return encipher_bifid(msg, '', key) -def decipher_bifid6(ct, key): +def decipher_bifid6(msg, key): r""" - Performs the Bifid cipher decryption on ciphertext ``ct``, and returns the plaintext. + Performs the Bifid cipher decryption on ciphertext ``msg``, and + returns the plaintext. - This is the version of the Bifid cipher that uses the `6 \times 6` Polybius square. - Assumes alphabet of symbols is "A", ..., "Z", "0", ..., "9". + This is the version of the Bifid cipher that uses the `6 \times 6` + Polybius square. INPUT: - ``ct``: ciphertext string (digits okay) + ``msg``: ciphertext string (digits okay); converted to upper case - ``key``: short string for key (no repetitions, digits okay) + ``key``: short string for key (digits okay). If ``key`` is + less than 36 characters long, the square will be filled with + letters A through Z and digits 0 through 9. All letters are + converted to uppercase. OUTPUT: @@ -759,32 +1141,19 @@ def decipher_bifid6(ct, key): ======== >>> from sympy.crypto.crypto import encipher_bifid6, decipher_bifid6 - >>> key = "encrypt" - >>> pt = "meet me on monday at 8am" - >>> encipher_bifid6(pt, key) - 'HNHOKNTA5MEPEGNQZYG' - >>> ct = "HNHOKNTA5MEPEGNQZYG" - >>> decipher_bifid6(ct, key) + >>> key = "gold bug" + >>> encipher_bifid6('meet me on monday at 8am', key) + 'KFKLJJHF5MMMKTFRGPL' + >>> decipher_bifid6(_, key) 'MEETMEONMONDAYAT8AM' """ - A = alphabet_of_cipher() + [str(a) for a in range(10)] - # first make sure the letters are capitalized - # and text has no spaces - key = uniq(key) - key0 = [x.capitalize() for x in key if x.isalnum()] - ct0 = [x.capitalize() for x in ct if x.isalnum()] - # create long key - long_key = key0 + [x for x in A if not(x in key0)] - n = len(ct0) - # the fractionalization - pairs = flatten([[long_key.index(x)//6, long_key.index(x) % 6] for x in ct0]) - tmp_plain = flatten([[pairs[i], pairs[n + i]] for i in range(n)]) - pt = "".join([long_key[6*tmp_plain[2*i] + tmp_plain[2*i + 1]] for i in range(n)]) - return pt + msg, key, _ = _prep(msg.upper(), key.upper(), None, bifid6) + key = padded_key(key, bifid6) + return decipher_bifid(msg, '', key) -def bifid6_square(key): +def bifid6_square(key=None): r""" 6x6 Polybius square. @@ -795,108 +1164,22 @@ def bifid6_square(key): ======== >>> from sympy.crypto.crypto import bifid6_square - >>> key = "encrypt" + >>> key = "gold bug" >>> bifid6_square(key) Matrix([ - [E, N, C, R, Y, P], - [T, A, B, D, F, G], - [H, I, J, K, L, M], - [O, Q, S, U, V, W], - [X, Z, 0, 1, 2, 3], + [G, O, L, D, B, U], + [A, C, E, F, H, I], + [J, K, M, N, P, Q], + [R, S, T, V, W, X], + [Y, Z, 0, 1, 2, 3], [4, 5, 6, 7, 8, 9]]) - """ - A = alphabet_of_cipher() + [str(a) for a in range(10)] - # first make sure the letters are capitalized - # and text has no spaces - key = uniq(key) - key0 = [x.capitalize() for x in key if x.isalnum()] - # create long key - long_key = key0 + [x for x in A if not(x in key0)] - f = lambda i, j: Symbol(long_key[6*i + j]) - M = Matrix(6, 6, f) - return M - - -def encipher_bifid7(pt, key): - r""" - Performs the Bifid cipher encryption on plaintext ``pt``, and returns the ciphertext. - - This is the version of the Bifid cipher that uses the `7 \times 7` Polybius square. - Assumes alphabet of symbols is "A", ..., "Z", "0", ..., "22". - (Also, assumes you have some way of distinguishing "22" - from "2", "2" juxtaposed together for deciphering...) - - INPUT: - - ``pt``: plaintext string (digits okay) - - ``key``: short string for key (no repetitions, digits okay) - - OUTPUT: - - ciphertext from Bifid7 cipher (all caps, no spaces) - - Examples - ======== - - >>> from sympy.crypto.crypto import encipher_bifid7 - >>> key = "encrypt" - >>> pt = "meet me on monday at 8am" - >>> encipher_bifid7(pt, key) - 'JEJJLNAA3ME19YF3J222R' - - """ - A = alphabet_of_cipher() + [str(a) for a in range(23)] - # first make sure the letters are capitalized - # and text has no spaces - key = uniq(key) - key0 = [x.capitalize() for x in key if x.isalnum()] - pt0 = [x.capitalize() for x in pt if x.isalnum()] - # create long key - long_key = key0 + [x for x in A if not(x in key0)] - n = len(pt0) - # the fractionalization - pairs = [[long_key.index(x)//7, long_key.index(x) % 7] for x in pt0] - tmp_cipher = flatten([x[0] for x in pairs] + [x[1] for x in pairs]) - ct = "".join([long_key[7*tmp_cipher[2*i] + tmp_cipher[2*i + 1]] for i in range(n)]) - return ct - - -def bifid7_square(key): - r""" - 7x7 Polybius square. - - Produce the Polybius square for the `7 \times 7` Bifid cipher. - Assumes alphabet of symbols is "A", ..., "Z", "0", ..., "22". - (Also, assumes you have some way of distinguishing "22" - from "2", "2" juxtaposed together for deciphering...) - - Examples - ======== - - >>> from sympy.crypto.crypto import bifid7_square - >>> bifid7_square("gold bug") - Matrix([ - [ G, O, L, D, B, U, A], - [ C, E, F, H, I, J, K], - [ M, N, P, Q, R, S, T], - [ V, W, X, Y, Z, 0, 1], - [ 2, 3, 4, 5, 6, 7, 8], - [ 9, 10, 11, 12, 13, 14, 15], - [16, 17, 18, 19, 20, 21, 22]]) - - """ - A = alphabet_of_cipher() + [str(a) for a in range(23)] - # first make sure the letters are capitalized - # and text has no spaces - key = uniq(key) - key0 = [x.capitalize() for x in key if x.isalnum()] - # create long key - long_key = key0 + [x for x in A if (not(x in key0))] - f = lambda i, j: Symbol(long_key[7*i + j]) - M = Matrix(7, 7, f) - return M + if not key: + key = bifid6 + else: + _, key, _ = _prep('', key.upper(), None, bifid6) + key = padded_key(key, bifid6) + return bifid_square(key) #################### RSA ############################# @@ -904,34 +1187,36 @@ def bifid7_square(key): def rsa_public_key(p, q, e): r""" - The RSA *public key* is the pair `(n,e)`, where `n` + Return the RSA *public key* pair, `(n, e)`, where `n` is a product of two primes and `e` is relatively - prime (coprime) to the Euler totient `\phi(n)`. + prime (coprime) to the Euler totient `\phi(n)`. False + is returned if any assumption is violated. Examples ======== >>> from sympy.crypto.crypto import rsa_public_key >>> p, q, e = 3, 5, 7 - >>> n, e = rsa_public_key(p, q, e) - >>> n - 15 - >>> e - 7 + >>> rsa_public_key(p, q, e) + (15, 7) + >>> rsa_public_key(p, q, 30) + False """ n = p*q - phi = totient(n) - if isprime(p) and isprime(q) and gcd(e, phi) == 1: - return n, e + if isprime(p) and isprime(q): + phi = totient(n) + if gcd(e, phi) == 1: + return n, e return False def rsa_private_key(p, q, e): r""" - The RSA *private key* is the pair `(n,d)`, where `n` + Return the RSA *private key*, `(n,d)`, where `n` is a product of two primes and `d` is the inverse of - `e` (mod `\phi(n)`). + `e` (mod `\phi(n)`). False is returned if any assumption + is violated. Examples ======== @@ -940,20 +1225,23 @@ def rsa_private_key(p, q, e): >>> p, q, e = 3, 5, 7 >>> rsa_private_key(p, q, e) (15, 7) + >>> rsa_private_key(p, q, 30) + False """ n = p*q - phi = totient(n) - if isprime(p) and isprime(q) and gcd(e, phi) == 1: - d = int(invert(e,phi)) - return n, d + if isprime(p) and isprime(q): + phi = totient(n) + if gcd(e, phi) == 1: + d = mod_inverse(e, phi) + return n, d return False -def encipher_rsa(pt, puk): +def encipher_rsa(i, key): """ - In RSA, a message `m` is encrypted by computing - `m^e` (mod `n`), where ``puk`` is the public key `(n,e)`. + Return encryption of ``i`` by computing `i^e` (mod `n`), + where ``key`` is the public key `(n, e)`. Examples ======== @@ -961,19 +1249,19 @@ def encipher_rsa(pt, puk): >>> from sympy.crypto.crypto import encipher_rsa, rsa_public_key >>> p, q, e = 3, 5, 7 >>> puk = rsa_public_key(p, q, e) - >>> pt = 12 - >>> encipher_rsa(pt, puk) + >>> msg = 12 + >>> encipher_rsa(msg, puk) 3 """ - n, e = puk - return pow(pt, e, n) + n, e = key + return pow(i, e, n) -def decipher_rsa(ct, prk): +def decipher_rsa(i, key): """ - In RSA, a ciphertext `c` is decrypted by computing - `c^d` (mod `n`), where ``prk`` is the private key `(n, d)`. + Return decyption of ``i`` by computing `i^d` (mod `n`), + where ``key`` is the private key `(n, d)`. Examples ======== @@ -981,13 +1269,13 @@ def decipher_rsa(ct, prk): >>> from sympy.crypto.crypto import decipher_rsa, rsa_private_key >>> p, q, e = 3, 5, 7 >>> prk = rsa_private_key(p, q, e) - >>> ct = 3 - >>> decipher_rsa(ct, prk) + >>> msg = 3 + >>> decipher_rsa(msg, prk) 12 """ - n, d = prk - return pow(ct, d, n) + n, d = key + return pow(i, d, n) #################### kid krypto (kid RSA) ############################# @@ -1002,15 +1290,16 @@ def kid_rsa_public_key(a, b, A, B): Key generation: * Select positive integers `a, b, A, B` at random. - * Compute `M = a b - 1`, `e = A M + a`, `d = B M + b`, `n = (e d - 1) /M`. + * Compute `M = a b - 1`, `e = A M + a`, `d = B M + b`, + `n = (e d - 1)//M`. * The *public key* is `(n, e)`. Bob sends these to Alice. - * The *private key* is `d`, which Bob keeps secret. + * The *private key* is `(n, d)`, which Bob keeps secret. - Encryption: If `m` is the plaintext message then the - ciphertext is `c = m e \pmod n`. + Encryption: If `p` is the plaintext message then the + ciphertext is `c = p e \pmod n`. Decryption: If `c` is the ciphertext message then the - plaintext is `m = c d \pmod n`. + plaintext is `p = c d \pmod n`. Examples ======== @@ -1021,17 +1310,18 @@ def kid_rsa_public_key(a, b, A, B): (369, 58) """ - M = S(a*b - 1) - e = S(A*M + a) - d = S(B*M + b) - n = S((e*d - 1)//M) + M = a*b - 1 + e = A*M + a + d = B*M + b + n = (e*d - 1)//M return n, e def kid_rsa_private_key(a, b, A, B): """ - Compute `M = a b - 1`, `e = A M + a`, `d = B M + b`, `n = (e d - 1) / M`. - The *private key* is `d`, which Bob keeps secret. + Compute `M = a b - 1`, `e = A M + a`, `d = B M + b`, + `n = (e d - 1) / M`. The *private key* is `d`, which Bob + keeps secret. Examples ======== @@ -1042,61 +1332,93 @@ def kid_rsa_private_key(a, b, A, B): (369, 70) """ - M = S(a*b - 1) - e = S(A*M + a) - d = S(B*M + b) - n = S((e*d - 1)//M) + M = a*b - 1 + e = A*M + a + d = B*M + b + n = (e*d - 1)//M return n, d -def encipher_kid_rsa(pt, puk): +def encipher_kid_rsa(msg, key): """ - Here ``pt`` is the plaintext and ``puk`` is the public key. + Here ``msg`` is the plaintext and ``key`` is the public key. Examples ======== - >>> from sympy.crypto.crypto import encipher_kid_rsa, kid_rsa_public_key - >>> pt = 200 + >>> from sympy.crypto.crypto import ( + ... encipher_kid_rsa, kid_rsa_public_key) + >>> msg = 200 >>> a, b, A, B = 3, 4, 5, 6 - >>> pk = kid_rsa_public_key(a, b, A, B) - >>> encipher_kid_rsa(pt, pk) + >>> key = kid_rsa_public_key(a, b, A, B) + >>> encipher_kid_rsa(msg, key) 161 """ - return (pt*puk[1]) % puk[0] + n, e = key + return (msg*e) % n -def decipher_kid_rsa(ct, prk): +def decipher_kid_rsa(msg, key): """ - Here ``pt`` is the plaintext and ``prk`` is the private key. + Here ``msg`` is the plaintext and ``key`` is the private key. Examples ======== - >>> from sympy.crypto.crypto import kid_rsa_public_key, kid_rsa_private_key, decipher_kid_rsa, encipher_kid_rsa + >>> from sympy.crypto.crypto import ( + ... kid_rsa_public_key, kid_rsa_private_key, + ... decipher_kid_rsa, encipher_kid_rsa) >>> a, b, A, B = 3, 4, 5, 6 >>> d = kid_rsa_private_key(a, b, A, B) - >>> pt = 200 - >>> pk = kid_rsa_public_key(a, b, A, B) - >>> prk = kid_rsa_private_key(a, b, A, B) - >>> ct = encipher_kid_rsa(pt, pk) - >>> decipher_kid_rsa(ct, prk) + >>> msg = 200 + >>> pub = kid_rsa_public_key(a, b, A, B) + >>> pri = kid_rsa_private_key(a, b, A, B) + >>> ct = encipher_kid_rsa(msg, pub) + >>> decipher_kid_rsa(ct, pri) 200 """ - n = prk[0] - d = prk[1] - return (ct*d) % n + n, d = key + return (msg*d) % n #################### Morse Code ###################################### - -def encode_morse(pt): +morse_char = { + ".-": "A", "-...": "B", + "-.-.": "C", "-..": "D", + ".": "E", "..-.": "F", + "--.": "G", "....": "H", + "..": "I", ".---": "J", + "-.-": "K", ".-..": "L", + "--": "M", "-.": "N", + "---": "O", ".--.": "P", + "--.-": "Q", ".-.": "R", + "...": "S", "-": "T", + "..-": "U", "...-": "V", + ".--": "W", "-..-": "X", + "-.--": "Y", "--..": "Z", + "-----": "0", "----": "1", + "..---": "2", "...--": "3", + "....-": "4", ".....": "5", + "-....": "6", "--...": "7", + "---..": "8", "----.": "9", + ".-.-.-": ".", "--..--": ",", + "---...": ":", "-.-.-.": ";", + "..--..": "?", "-...-": "-", + "..--.-": "_", "-.--.": "(", + "-.--.-": ")", ".----.": "'", + "-...-": "=", ".-.-.": "+", + "-..-.": "/", ".--.-.": "@", + "...-..-": "$", "-.-.--": "!"} +char_morse = dict([(v, k) for k, v in morse_char.items()]) + + +def encode_morse(msg, sep='|', mapping=None): """ - Encodes a plaintext into popular Morse Code with letters separated by "|" - and words by "||". + Encodes a plaintext into popular Morse Code with letters + separated by `sep` and words by a double `sep`. References ========== @@ -1107,64 +1429,44 @@ def encode_morse(pt): ======== >>> from sympy.crypto.crypto import encode_morse - >>> pt = 'ATTACK THE RIGHT FLANK' - >>> encode_morse(pt) - '.-|-|-|.-|-.-.|-.-||-|....|.||.-.|..|--.|....|-||..-.|.-..|.-|-.|-.-' - - """ - - morse_encoding_map = {"A": ".-", "B": "-...", - "C": "-.-.", "D": "-..", - "E": ".", "F": "..-.", - "G": "--.", "H": "....", - "I": "..", "J": ".---", - "K": "-.-", "L": ".-..", - "M": "--", "N": "-.", - "O": "---", "P": ".--.", - "Q": "--.-", "R": ".-.", - "S": "...", "T": "-", - "U": "..-", "V": "...-", - "W": ".--", "X": "-..-", - "Y": "-.--", "Z": "--..", - "0": "-----", "1": ".----", - "2": "..---", "3": "...--", - "4": "....-", "5": ".....", - "6": "-....", "7": "--...", - "8": "---..", "9": "----.", - ".": ".-.-.-", ",": "--..--", - ":": "---...", ";": "-.-.-.", - "?": "..--..", "-": "-...-", - "_": "..--.-", "(": "-.--.", - ")": "-.--.-", "'": ".----.", - "=": "-...-", "+": ".-.-.", - "/": "-..-.", "@": ".--.-.", - "$": "...-..-", "!": "-.-.--" } - - unusable_chars = "\"#%&*<>[\]^`{|}~" - morsestring = [] + >>> msg = 'ATTACK RIGHT FLANK' + >>> encode_morse(msg) + '.-|-|-|.-|-.-.|-.-||.-.|..|--.|....|-||..-.|.-..|.-|-.|-.-' + + """ + + mapping = mapping or char_morse + assert sep not in mapping + word_sep = 2*sep + mapping[" "] = word_sep + suffix = msg and msg[-1] in whitespace - for i in unusable_chars: - pt = pt.replace(i, "") - pt = pt.upper() + # normalize whitespace + msg = (' ' if word_sep else '').join(msg.split()) + # omit unmapped chars + chars = set(''.join(msg.split())) + ok = set(mapping.keys()) + msg = translate(msg, None, ''.join(chars - ok)) - words = pt.split(" ") + morsestring = [] + words = msg.split() for word in words: - letters = list(word) morseword = [] - for letter in letters: - morseletter = morse_encoding_map[letter] + for letter in word: + morseletter = mapping[letter] morseword.append(morseletter) - word = "|".join(morseword) + word = sep.join(morseword) morsestring.append(word) - return "||".join(morsestring) + return word_sep.join(morsestring) + (word_sep if suffix else '') -def decode_morse(mc): +def decode_morse(msg, sep='|', mapping=None): """ - Decodes a Morse Code with letters separated by "|" - and words by "||" into plaintext. + Decodes a Morse Code with letters separated by `sep` + (default is '|') and words by `word_sep` (default is '||) + into plaintext. References ========== @@ -1181,51 +1483,17 @@ def decode_morse(mc): """ - morse_decoding_map = {".-": "A", "-...": "B", - "-.-.": "C", "-..": "D", - ".": "E", "..-.": "F", - "--.": "G", "....": "H", - "..": "I", ".---": "J", - "-.-": "K", ".-..": "L", - "--": "M", "-.": "N", - "---": "O", ".--.": "P", - "--.-": "Q", ".-.": "R", - "...": "S", "-": "T", - "..-": "U", "...-": "V", - ".--": "W", "-..-": "X", - "-.--": "Y", "--..": "Z", - "-----": "0", "----": "1", - "..---": "2", "...--": "3", - "....-": "4", ".....": "5", - "-....": "6", "--...": "7", - "---..": "8", "----.": "9", - ".-.-.-": ".", "--..--": ",", - "---...": ":", "-.-.-.": ";", - "..--..": "?", "-...-": "-", - "..--.-": "_", "-.--.": "(", - "-.--.-": ")", ".----.": "'", - "-...-": "=", ".-.-.": "+", - "-..-.": "/", ".--.-.": "@", - "...-..-": "$", "-.-.--": "!"} - + mapping = mapping or morse_char + word_sep = 2*sep characterstring = [] - - if mc[-1] == "|" and mc[-2] == "|": - mc = mc[:-2] - words = mc.split("||") + words = msg.strip(word_sep).split(word_sep) for word in words: - letters = word.split("|") - characterword = [] - for letter in letters: - try: - characterletter = morse_decoding_map[letter] - except KeyError: - return "Invalid Morse Code" - characterword.append(characterletter) - - word = "".join(characterword) + letters = word.split(sep) + chars = [mapping[c] for c in letters] + word = ''.join(chars) characterstring.append(word) - return " ".join(characterstring) + rv = " ".join(characterstring) + return rv #################### LFSRs ########################################## @@ -1248,7 +1516,8 @@ def lfsr_sequence(key, fill, n): OUTPUT: - The lfsr sequence defined by `x_{n+1} = c_k x_n + \ldots + c_0 x_{n-k}`, for + The lfsr sequence defined by + `x_{n+1} = c_k x_n + \ldots + c_0 x_{n-k}`, for `n \leq k`. Notes @@ -1295,7 +1564,8 @@ def lfsr_sequence(key, fill, n): References ========== - .. [G] Solomon Golomb, Shift register sequences, Aegean Park Press, Laguna Hills, Ca, 1967 + .. [G] Solomon Golomb, Shift register sequences, Aegean Park Press, + Laguna Hills, Ca, 1967 Examples ======== @@ -1306,7 +1576,8 @@ def lfsr_sequence(key, fill, n): >>> fill = [F(1), F(1), F(0), F(1)] >>> key = [F(1), F(0), F(0), F(1)] >>> lfsr_sequence(key, fill, 10) - [1 mod 2, 1 mod 2, 0 mod 2, 1 mod 2, 0 mod 2, 1 mod 2, 1 mod 2, 0 mod 2, 0 mod 2, 1 mod 2] + [1 mod 2, 1 mod 2, 0 mod 2, 1 mod 2, 0 mod 2, + 1 mod 2, 1 mod 2, 0 mod 2, 0 mod 2, 1 mod 2] """ if not isinstance(key, list): @@ -1329,7 +1600,7 @@ def lfsr_sequence(key, fill, n): def lfsr_autocorrelation(L, P, k): """ - This function computes the lsfr autocorrelation function. + This function computes the LFSR autocorrelation function. INPUT: @@ -1347,7 +1618,8 @@ def lfsr_autocorrelation(L, P, k): Examples ======== - >>> from sympy.crypto.crypto import lfsr_sequence, lfsr_autocorrelation + >>> from sympy.crypto.crypto import ( + ... lfsr_sequence, lfsr_autocorrelation) >>> from sympy.polys.domains import FF >>> F = FF(2) >>> fill = [F(1), F(1), F(0), F(1)] @@ -1372,28 +1644,33 @@ def lfsr_autocorrelation(L, P, k): def lfsr_connection_polynomial(s): """ - This function computes the lsfr connection polynomial. + This function computes the LFSR connection polynomial. INPUT: - ``s``: a sequence of elements of even length, with entries in a finite field + ``s``: a sequence of elements of even length, with entries in + a finite field OUTPUT: - ``C(x)``: the connection polynomial of a minimal LFSR yielding ``s``. + ``C(x)``: the connection polynomial of a minimal LFSR yielding + ``s``. - This implements the algorithm in section 3 of J. L. Massey's article [M]_. + This implements the algorithm in section 3 of J. L. Massey's + article [M]_. References ========== .. [M] James L. Massey, "Shift-Register Synthesis and BCH Decoding." - IEEE Trans. on Information Theory, vol. 15(1), pp. 122-127, Jan 1969. + IEEE Trans. on Information Theory, vol. 15(1), pp. 122-127, + Jan 1969. Examples ======== - >>> from sympy.crypto.crypto import lfsr_sequence, lfsr_connection_polynomial + >>> from sympy.crypto.crypto import ( + ... lfsr_sequence, lfsr_connection_polynomial) >>> from sympy.polys.domains import FF >>> F = FF(2) >>> fill = [F(1), F(1), F(0), F(1)] @@ -1432,8 +1709,10 @@ def lfsr_connection_polynomial(s): if L > 0: dC = Poly(C).degree() r = min(L + 1, dC + 1) - coeffsC = [C.subs(x, 0)] + [C.coeff(x**i) for i in range(1, dC + 1)] - d = (s[N].to_int() + sum([coeffsC[i]*s[N - i].to_int() for i in range(1, r)])) % p + coeffsC = [C.subs(x, 0)] + [C.coeff(x**i) + for i in range(1, dC + 1)] + d = (s[N].to_int() + sum([coeffsC[i]*s[N - i].to_int() + for i in range(1, r)])) % p if L == 0: d = s[N].to_int()*x**0 if d == 0: @@ -1454,13 +1733,14 @@ def lfsr_connection_polynomial(s): N += 1 dC = Poly(C).degree() coeffsC = [C.subs(x, 0)] + [C.coeff(x**i) for i in range(1, dC + 1)] - return sum([coeffsC[i] % p*x**i for i in range(dC + 1) if coeffsC[i] is not None]) + return sum([coeffsC[i] % p*x**i for i in range(dC + 1) + if coeffsC[i] is not None]) #################### ElGamal ############################# -def elgamal_private_key(digit=10): +def elgamal_private_key(digit=10, seed=None): """ Return three number tuple as private key. @@ -1469,20 +1749,25 @@ def elgamal_private_key(digit=10): `a^{b} \equiv c \pmod p` - In general, if a and b are known, c is easily - calculated. If b is unknown, it is hard to use - a and c to get b. + In general, if ``a`` and ``b`` are known, ``ct`` is easily + calculated. If ``b`` is unknown, it is hard to use + ``a`` and ``ct`` to get ``b``. Parameters ========== - digit : Key length in binary + digit : minimum number of binary digits for key Returns ======= (p, r, d) : p = prime number, r = primitive root, d = random number + Notes + ===== + + For testing purposes, the ``seed`` parameter may be set to control + the output of this routine. See sympy.utilities.randtest._randrange. Examples ======== @@ -1496,18 +1781,19 @@ def elgamal_private_key(digit=10): True """ + randrange = _randrange(seed) p = nextprime(2**digit) return p, primitive_root(p), randrange(2, p) -def elgamal_public_key(prk): +def elgamal_public_key(key): """ Return three number tuple as public key. Parameters ========== - prk : Tuple (p, r, e) generated by ``elgamal_private_key`` + key : Tuple (p, r, e) generated by ``elgamal_private_key`` Returns ======= @@ -1521,17 +1807,19 @@ def elgamal_public_key(prk): (1031, 14, 212) """ - return prk[0], prk[1], pow(prk[1], prk[2], prk[0]) + p, r, e = key + return p, r, pow(r, e, p) -def encipher_elgamal(m, puk): +def encipher_elgamal(i, key, seed=None): """ Encrypt message with public key - m is plain text message in int. puk is - public key (p, r, e). In order to encrypt - a message, a random number ``a`` between ``2`` and ``p``, - encryped message is `c_{1}` and `c_{2}` + ``i`` is a plaintext message expressed as an integer. + ``key`` is public key (p, r, e). In order to encrypt + a message, a random number ``a`` in ``range(2, p)`` + is generated and the encryped message is returned as + `c_{1}` and `c_{2}` where: `c_{1} \equiv r^{a} \pmod p` @@ -1540,35 +1828,49 @@ def encipher_elgamal(m, puk): Parameters ========== - m : int of encoded message - puk : public key + msg : int of encoded message + key : public key Returns ======= (c1, c2) : Encipher into two number + Notes + ===== + + For testing purposes, the ``seed`` parameter may be set to control + the output of this routine. See sympy.utilities.randtest._randrange. + Examples ======== - >>> from sympy.crypto.crypto import encipher_elgamal - >>> encipher_elgamal(100, (1031, 14, 212)) # doctest: +SKIP - (835, 271) + >>> from sympy.crypto.crypto import encipher_elgamal, elgamal_private_key, elgamal_public_key + >>> pri = elgamal_private_key(5, seed=[3]); pri + (37, 2, 3) + >>> pub = elgamal_public_key(pri); pub + (37, 2, 8) + >>> msg = 36 + >>> encipher_elgamal(msg, pub, seed=[3]) + (8, 6) """ - if m > puk[0]: - raise ValueError('Message {} should be less than prime {}'.format(m, puk[0])) - r = randrange(2, puk[0]) - return pow(puk[1], r, puk[0]), m * pow(puk[2], r, puk[0]) % puk[0] + p, r, e = key + if i < 0 or i >= p: + raise ValueError( + 'Message (%s) should be in range(%s)' % (i, p)) + randrange = _randrange(seed) + a = randrange(2, p) + return pow(r, a, p), i*pow(e, a, p) % p -def decipher_elgamal(ct, prk): +def decipher_elgamal(msg, key): r""" Decrypt message with private key - `ct = (c_{1}, c_{2})` + `msg = (c_{1}, c_{2})` - `prk = (p, r, d)` + `key = (p, r, d)` According to extended Eucliden theorem, `u c_{1}^{d} + p n = 1` @@ -1583,45 +1885,62 @@ def decipher_elgamal(ct, prk): ======== >>> from sympy.crypto.crypto import decipher_elgamal - >>> decipher_elgamal((835, 271), (1031, 14, 636)) - 100 + >>> from sympy.crypto.crypto import encipher_elgamal + >>> from sympy.crypto.crypto import elgamal_private_key + >>> from sympy.crypto.crypto import elgamal_public_key + + >>> pri = elgamal_private_key(5, seed=[3]) + >>> pub = elgamal_public_key(pri); pub + (37, 2, 8) + >>> msg = 17 + >>> decipher_elgamal(encipher_elgamal(msg, pub), pri) == msg + True """ - u = igcdex(ct[0] ** prk[2], prk[0])[0] - return u * ct[1] % prk[0] + p, r, d = key + c1, c2 = msg + u = igcdex(c1**d, p)[0] + return u * c2 % p -#################### Diffie-Hellman Key Exchange ############################# +################ Diffie-Hellman Key Exchange ######################### -def dh_private_key(digit = 10): +def dh_private_key(digit=10, seed=None): """ - Return two number tuple as private key. + Return three integer tuple as private key. Diffie-Hellman key exchange is based on the mathematical problem called the Discrete Logarithm Problem (see ElGamal). Diffie-Hellman key exchange is divided into the following steps: - * Alice and Bob agree on a base that consist of a prime p and a - primitive root of p called g - * Alice choses a number a and Bob choses a number b where a - and b are random numbers with 1 < a, b < p. These are their - private keys. + * Alice and Bob agree on a base that consist of a prime ``p`` + and a primitive root of ``p`` called ``g`` + * Alice choses a number ``a`` and Bob choses a number ``b`` where + ``a`` and ``b`` are random numbers in range `[2, p)`. These are + their private keys. * Alice then publicly sends Bob `g^{a} \pmod p` while Bob sends Alice `g^{b} \pmod p` - * They both raise the received value to their secretly chose number - (a or b) and now have both as their shared key `g^{ab} \pmod p` + * They both raise the received value to their secretly chosen + number (``a`` or ``b``) and now have both as their shared key + `g^{ab} \pmod p` Parameters ========== - digit: Key length in binary + digit: minimum number of binary digits required in key Returns ======= (p, g, a) : p = prime number, g = primitive root of p, - a = random number in between 2 and p - 1 + a = random number from 2 thru p - 1 + + Notes + ===== + + For testing purposes, the ``seed`` parameter may be set to control + the output of this routine. See sympy.utilities.randtest._randrange. Examples ======== @@ -1640,21 +1959,23 @@ def dh_private_key(digit = 10): True """ - p = nextprime(2 ** digit) + p = nextprime(2**digit) g = primitive_root(p) + randrange = _randrange(seed) a = randrange(2, p) return p, g, a -def dh_public_key(prk): + +def dh_public_key(key): """ - Return two number tuple as public key. + Return three number tuple as public key. This is the tuple that Alice sends to Bob. Parameters ========== - prk: Tuple (p, g, a) generated by ``dh_private_key`` + key: Tuple (p, g, a) generated by ``dh_private_key`` Returns ======= @@ -1673,12 +1994,13 @@ def dh_public_key(prk): True """ - p, g, a = prk + p, g, a = key return p, g, pow(g, a, p) -def dh_shared_key(puk, b): + +def dh_shared_key(key, b): """ - Return int as shared key. + Return an integer that is the shared key. This is what Bob and Alice can both calculate using the public keys they received from each other and their private keys. @@ -1686,19 +2008,20 @@ def dh_shared_key(puk, b): Parameters ========== - puk: Tuple (p, g, x) generated by ``dh_public_key`` + key: Tuple (p, g, x) generated by ``dh_public_key`` b: Random number in the range of 2 to p - 1 (Chosen by second key exchange member (Bob)) Returns ======= - sk: int as shared key + shared key (int) Examples ======== - >>> from sympy.crypto.crypto import dh_private_key, dh_public_key, dh_shared_key + >>> from sympy.crypto.crypto import ( + ... dh_private_key, dh_public_key, dh_shared_key) >>> prk = dh_private_key(); >>> p, g, x = dh_public_key(prk); >>> sk = dh_shared_key((p, g, x), 1000) @@ -1706,8 +2029,10 @@ def dh_shared_key(puk, b): True """ - p, _, x = puk + p, _, x = key if 1 >= b or b >= p: - raise ValueError('Value of b should be greater 1 and less than prime {}'\ - .format(p)) + raise ValueError(filldedent(''' + Value of b should be greater 1 and less + than prime %s.''' % p)) + return pow(x, b, p) diff --git a/sympy/utilities/misc.py b/sympy/utilities/misc.py index 5d0921f8da..2bb228cc06 100644 --- a/sympy/utilities/misc.py +++ b/sympy/utilities/misc.py @@ -4,10 +4,12 @@ import sys import os +import re as _re from textwrap import fill, dedent from sympy.core.compatibility import get_function_name, range + def filldedent(s, w=70): """ Strips leading and trailing empty lines from a copy of `s`, then dedents, @@ -217,5 +219,183 @@ def find_executable(executable, path=None): def func_name(x): - '''return function name of `x` (if defined) else the `type(x)`.''' + '''Return function name of `x` (if defined) else the `type(x)`. + See Also + ======== + sympy.core.compatibility get_function_name + ''' return getattr(getattr(x, 'func', x), '__name__', type(x)) + + +def _replace(reps): + """Return a function that can make the replacements, given in + ``reps``, on a string. The replacements should be given as mapping. + + Examples + ======== + + >>> from sympy.utilities.misc import _replace + >>> f = _replace(dict(foo='bar', d='t')) + >>> f('food') + 'bart' + >>> f = _replace({}) + >>> f('food') + 'food' + """ + if not reps: + return lambda x: x + D = lambda match: reps[match.group(0)] + pattern = _re.compile("|".join( + [_re.escape(k) for k, v in reps.items()]), _re.M) + return lambda string: pattern.sub(D, string) + + +def replace(string, *reps): + """Return ``string`` with all keys in ``reps`` replaced with + their corresponding values, longer strings first, irrespective + of the order they are given. ``reps`` may be passed as tuples + or a single mapping. + + Examples + ======== + + >>> from sympy.utilities.misc import replace + >>> replace('foo', {'oo': 'ar', 'f': 'b'}) + 'bar' + >>> replace("spamham sha", ("spam", "eggs"), ("sha","md5")) + 'eggsham md5' + + There is no guarantee that a unique answer will be + obtained if keys in a mapping overlap (i.e. are the same + length and have some identical sequence at the + beginning/end): + + >>> reps = [ + ... ('ab', 'x'), + ... ('bc', 'y')] + >>> replace('abc', *reps) in ('xc', 'ay') + True + + References + ========== + + .. [1] http://stackoverflow.com/questions/6116978/python-replace-multiple-strings + """ + if len(reps) == 1: + kv = reps[0] + if type(kv) is dict: + reps = kv + else: + return string.replace(*kv) + else: + reps = dict(reps) + return _replace(reps)(string) + + +def translate(s, a, b=None, c=None): + """Return ``s`` where characters have been replaced or deleted. + + SYNTAX + ====== + + translate(s, None, deletechars): + all characters in ``deletechars`` are deleted + translate(s, map [,deletechars]): + all characters in ``deletechars`` (if provided) are deleted + then the replacements defined by map are made; if the keys + of map are strings then the longer ones are handled first. + Multicharacter deletions should have a value of ''. + translate(s, oldchars, newchars, deletechars) + all characters in ``deletechars`` are deleted + then each character in ``oldchars`` is replaced with the + corresponding character in ``newchars`` + + Examples + ======== + + >>> from sympy.utilities.misc import translate + >>> from sympy.core.compatibility import unichr + >>> abc = 'abc' + >>> translate(abc, None, 'a') + 'bc' + >>> translate(abc, {'a': 'x'}, 'c') + 'xb' + >>> translate(abc, {'abc': 'x', 'a': 'y'}) + 'x' + + >>> translate('abcd', 'ac', 'AC', 'd') + 'AbC' + + There is no guarantee that a unique answer will be + obtained if keys in a mapping overlap are the same + length and have some identical sequences at the + beginning/end: + + >>> translate(abc, {'ab': 'x', 'bc': 'y'}) in ('xc', 'ay') + True + """ + from sympy.core.compatibility import maketrans + + # when support for Python 2 is dropped, this try/except can be + #removed + try: + ''.translate(None, '') + py3 = False + except TypeError: + py3 = True + + mr = {} + if a is None: + assert c is None + if not b: + return s + c = b + a = b = '' + else: + if type(a) is dict: + short = {} + for k in list(a.keys()): + if (len(k) == 1 and len(a[k]) == 1): + short[k] = a.pop(k) + mr = a + c = b + if short: + a, b = [''.join(i) for i in list(zip(*short.items()))] + else: + a = b = '' + else: + assert len(a) == len(b) + if py3: + if c: + s = s.translate(maketrans('', '', c)) + s = replace(s, mr) + return s.translate(maketrans(a, b)) + else: + # when support for Python 2 is dropped, this if-else-block + # can be replaced with the if-clause + if c: + c = list(c) + rem = {} + for i in range(-1, -1 - len(c), -1): + if ord(c[i]) > 255: + rem[c[i]] = '' + c.pop(i) + s = s.translate(None, ''.join(c)) + s = replace(s, rem) + if a: + a = list(a) + b = list(b) + for i in range(-1, -1 - len(a), -1): + if ord(a[i]) > 255 or ord(b[i]) > 255: + mr[a.pop(i)] = b.pop(i) + a = ''.join(a) + b = ''.join(b) + s = replace(s, mr) + table = maketrans(a, b) + # s may have become unicode which uses the py3 syntax for translate + if type(table) is str and type(s) is str: + s = s.translate(table) + else: + s = s.translate(dict( + [(i, ord(c)) for i, c in enumerate(table)])) + return s
Extend vigenère in crypto to accept repetitions in keys In line 369 of function `encipher_vigenere`, which is part of `crypto.py` the method `uniq` from utilities.iterables is used to return an iterator of the key-phrase without possible spaces in the key. However, `uniq` will return an iterator with only unique characters of the key. Therefore, if one uses a key such as "cool", `uniq` returns "col" and the function will hence encrypt in a wrong way. Example: `encipher_vigenere("wow", "cool")` returns "YCH". However, the right result would actually be "YCK". Also, instead of using `capitalize` on each character of the key or the plaintext (see lines 370 or 373) we could also simplify the problem by using `key.upper` or `pt.upper`. I would volunteer to fix this and if I am assuming right and `uniq` is used wrong also in other crypto functions such as `encipher_bifid5` or `bifid5_square` I could also fix these functions.
sympy/sympy
diff --git a/sympy/crypto/tests/test_crypto.py b/sympy/crypto/tests/test_crypto.py index e6af68682f..333e68a0a3 100644 --- a/sympy/crypto/tests/test_crypto.py +++ b/sympy/crypto/tests/test_crypto.py @@ -1,26 +1,25 @@ from sympy.core import symbols from sympy.core.compatibility import range -from sympy.crypto.crypto import (alphabet_of_cipher, cycle_list, +from sympy.crypto.crypto import (cycle_list, encipher_shift, encipher_affine, encipher_substitution, - encipher_vigenere, decipher_vigenere, + check_and_join, encipher_vigenere, decipher_vigenere, encipher_hill, decipher_hill, encipher_bifid5, encipher_bifid6, - bifid5_square, bifid6_square, bifid7_square, - encipher_bifid7, decipher_bifid5, decipher_bifid6, encipher_kid_rsa, + bifid5_square, bifid6_square, bifid5, bifid6, bifid10, + decipher_bifid5, decipher_bifid6, encipher_kid_rsa, decipher_kid_rsa, kid_rsa_private_key, kid_rsa_public_key, decipher_rsa, rsa_private_key, rsa_public_key, encipher_rsa, lfsr_connection_polynomial, lfsr_autocorrelation, lfsr_sequence, encode_morse, decode_morse, elgamal_private_key, elgamal_public_key, encipher_elgamal, decipher_elgamal, dh_private_key, dh_public_key, - dh_shared_key) + dh_shared_key, decipher_shift, decipher_affine, encipher_bifid, + decipher_bifid, bifid_square, padded_key, uniq) from sympy.matrices import Matrix from sympy.ntheory import isprime, is_primitive_root from sympy.polys.domains import FF -from sympy.utilities.pytest import raises -from random import randrange -def test_alphabet_of_cipher(): - assert alphabet_of_cipher()[0] == "A" - assert alphabet_of_cipher(symbols="1z") == ["1", "z"] +from sympy.utilities.pytest import raises, slow + +from random import randrange def test_cycle_list(): @@ -33,6 +32,7 @@ def test_encipher_shift(): assert encipher_shift("ABC", 0) == "ABC" assert encipher_shift("ABC", 1) == "BCD" assert encipher_shift("ABC", -1) == "ZAB" + assert decipher_shift("ZAB", -1) == "ABC" def test_encipher_affine(): @@ -41,11 +41,21 @@ def test_encipher_affine(): assert encipher_affine("ABC", (-1, 0)) == "AZY" assert encipher_affine("ABC", (-1, 1), symbols="ABCD") == "BAD" assert encipher_affine("123", (-1, 1), symbols="1234") == "214" + assert encipher_affine("ABC", (3, 16)) == "QTW" + assert decipher_affine("QTW", (3, 16)) == "ABC" def test_encipher_substitution(): - assert encipher_substitution("ABC", "BAC", symbols="ABC") == "BAC" - assert encipher_substitution("123", "124", symbols="1234") == "124" + assert encipher_substitution("ABC", "BAC", "ABC") == "BAC" + assert encipher_substitution("123", "1243", "1234") == "124" + + +def test_check_and_join(): + assert check_and_join("abc") == "abc" + assert check_and_join(uniq("aaabc")) == "abc" + assert check_and_join("ab c".split()) == "abc" + assert check_and_join("abc", "a", filter=True) == "a" + raises(ValueError, lambda: check_and_join('ab', 'a')) def test_encipher_vigenere(): @@ -73,8 +83,10 @@ def test_encipher_hill(): A = Matrix(2, 2, [1, 2, 3, 5]) assert encipher_hill("ABCD", A, symbols="ABCD") == "CBAB" assert encipher_hill("AB", A, symbols="ABCD") == "CB" - # n does not need to be a multiple of k - assert encipher_hill("ABA", A) == "CFAA" + # message length, n, does not need to be a multiple of k; + # it is padded + assert encipher_hill("ABA", A) == "CFGC" + assert encipher_hill("ABA", A, pad="Z") == "CFYV" def test_decipher_hill(): @@ -98,8 +110,7 @@ def test_encipher_bifid5(): def test_bifid5_square(): - A = alphabet_of_cipher() - A.remove("J") + A = bifid5 f = lambda i, j: symbols(A[5*i + j]) M = Matrix(5, 5, f) assert bifid5_square("") == M @@ -112,20 +123,6 @@ def test_decipher_bifid5(): assert decipher_bifid5("b ac", "b") == "ABC" -def test_bifid7_square(): - A = alphabet_of_cipher() + [str(a) for a in range(23)] - f = lambda i, j: symbols(A[7*i + j]) - M = Matrix(7, 7, f) - assert bifid7_square("") == M - - -def test_encipher_bifid7(): - assert encipher_bifid7("AB", "AB") == "AB" - assert encipher_bifid7("AB", "CD") == "CR" - assert encipher_bifid7("ab", "c") == "CJ" - assert encipher_bifid7("a bc", "b") == "BAC" - - def test_encipher_bifid6(): assert encipher_bifid6("AB", "AB") == "AB" assert encipher_bifid6("AB", "CD") == "CP" @@ -141,7 +138,7 @@ def test_decipher_bifid6(): def test_bifid6_square(): - A = alphabet_of_cipher() + [str(a) for a in range(10)] + A = bifid6 f = lambda i, j: symbols(A[6*i + j]) M = Matrix(6, 6, f) assert bifid6_square("") == M @@ -207,13 +204,18 @@ def test_decipher_kid_rsa(): def test_encode_morse(): assert encode_morse('ABC') == '.-|-...|-.-.' assert encode_morse('SMS ') == '...|--|...||' + assert encode_morse('SMS\n') == '...|--|...||' + assert encode_morse('') == '' + assert encode_morse(' ') == '||' + assert encode_morse(' ', sep='`') == '``' + assert encode_morse(' ', sep='``') == '````' assert encode_morse('!@#$%^&*()_+') == '-.-.--|.--.-.|...-..-|-.--.|-.--.-|..--.-|.-.-.' def test_decode_morse(): assert decode_morse('-.-|.|-.--') == 'KEY' assert decode_morse('.-.|..-|-.||') == 'RUN' - assert decode_morse('.....----') == 'Invalid Morse Code' + raises(KeyError, lambda: decode_morse('.....----')) def test_lfsr_sequence(): @@ -244,18 +246,22 @@ def test_lfsr_connection_polynomial(): s = lfsr_sequence([F(1), F(1)], [F(0), F(1)], 5) assert lfsr_connection_polynomial(s) == x**2 + x + 1 + def test_elgamal_private_key(): a, b, _ = elgamal_private_key(digit=100) assert isprime(a) assert is_primitive_root(b, a) assert len(bin(a)) >= 102 + def test_elgamal(): - dk = elgamal_private_key(20) + dk = elgamal_private_key(5) ek = elgamal_public_key(dk) - m = 12345 - assert m == decipher_elgamal(encipher_elgamal(m, ek), dk) - raises(ValueError, lambda: encipher_elgamal(2000, (1031, 14, 212))) + P = ek[0] + assert P - 1 == decipher_elgamal(encipher_elgamal(P - 1, ek), dk) + raises(ValueError, lambda: encipher_elgamal(P, dk)) + raises(ValueError, lambda: encipher_elgamal(-1, dk)) + def test_dh_private_key(): p, g, _ = dh_private_key(digit = 100) @@ -263,6 +269,7 @@ def test_dh_private_key(): assert is_primitive_root(g, p) assert len(bin(p)) >= 102 + def test_dh_public_key(): p1, g1, a = dh_private_key(digit = 100) p2, g2, ga = dh_public_key((p1, g1, a)) @@ -270,6 +277,7 @@ def test_dh_public_key(): assert g1 == g2 assert ga == pow(g1, a, p1) + def test_dh_shared_key(): prk = dh_private_key(digit = 100) p, _, ga = dh_public_key(prk) @@ -277,3 +285,21 @@ def test_dh_shared_key(): sk = dh_shared_key((p, _, ga), b) assert sk == pow(ga, b, p) raises(ValueError, lambda: dh_shared_key((1031, 14, 565), 2000)) + + +def test_padded_key(): + assert padded_key('b', 'ab') == 'ba' + raises(ValueError, lambda: padded_key('ab', 'ace')) + raises(ValueError, lambda: padded_key('ab', 'abba')) + + +def test_bifid(): + raises(ValueError, lambda: encipher_bifid('abc', 'b', 'abcde')) + assert encipher_bifid('abc', 'b', 'abcd') == 'bdb' + raises(ValueError, lambda: decipher_bifid('bdb', 'b', 'abcde')) + assert encipher_bifid('bdb', 'b', 'abcd') == 'abc' + raises(ValueError, lambda: bifid_square('abcde')) + assert bifid5_square("B") == \ + bifid5_square('BACDEFGHIKLMNOPQRSTUVWXYZ') + assert bifid6_square('B0') == \ + bifid6_square('B0ACDEFGHIJKLMNOPQRSTUVWXYZ123456789') diff --git a/sympy/utilities/tests/test_misc.py b/sympy/utilities/tests/test_misc.py new file mode 100644 index 0000000000..14df99ab74 --- /dev/null +++ b/sympy/utilities/tests/test_misc.py @@ -0,0 +1,22 @@ +from sympy.core.compatibility import unichr +from sympy.utilities.misc import translate, replace + +def test_translate(): + abc = 'abc' + translate(abc, None, 'a') == 'bc' + translate(abc, None, '') == 'abc' + translate(abc, {'a': 'x'}, 'c') == 'xb' + assert translate(abc, {'a': 'bc'}, 'c') == 'bcb' + assert translate(abc, {'ab': 'x'}, 'c') == 'x' + assert translate(abc, {'ab': ''}, 'c') == '' + assert translate(abc, {'bc': 'x'}, 'c') == 'ab' + assert translate(abc, {'abc': 'x', 'a': 'y'}) == 'x' + u = unichr(4096) + assert translate(abc, 'a', 'x', u) == 'xbc' + assert (u in translate(abc, 'a', u, u)) is True + + +def test_replace(): + assert replace('abc', ('a', 'b')) == 'bbc' + assert replace('abc', {'a': 'Aa'}) == 'Aabc' + assert replace('abc', ('a', 'b'), ('c', 'C')) == 'bbC'
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 5 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///croot/attrs_1668696182826/work certifi @ file:///croot/certifi_1671487769961/work/certifi flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work mpmath==1.3.0 packaging @ file:///croot/packaging_1671697413597/work pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pytest==7.1.2 -e git+https://github.com/sympy/sympy.git@0e0aec4a3a03649085d2aea8ec17ae94691f0770#egg=sympy tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing_extensions @ file:///croot/typing_extensions_1669924550328/work zipp @ file:///croot/zipp_1672387121353/work
name: sympy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=22.1.0=py37h06a4308_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - flit-core=3.6.0=pyhd3eb1b0_0 - importlib-metadata=4.11.3=py37h06a4308_0 - importlib_metadata=4.11.3=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=22.0=py37h06a4308_0 - pip=22.3.1=py37h06a4308_0 - pluggy=1.0.0=py37h06a4308_1 - py=1.11.0=pyhd3eb1b0_0 - pytest=7.1.2=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py37h06a4308_0 - typing_extensions=4.4.0=py37h06a4308_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zipp=3.11.0=py37h06a4308_0 - zlib=1.2.13=h5eee18b_1 - pip: - mpmath==1.3.0 prefix: /opt/conda/envs/sympy
[ "sympy/crypto/tests/test_crypto.py::test_cycle_list", "sympy/crypto/tests/test_crypto.py::test_encipher_shift", "sympy/crypto/tests/test_crypto.py::test_encipher_affine", "sympy/crypto/tests/test_crypto.py::test_encipher_substitution", "sympy/crypto/tests/test_crypto.py::test_check_and_join", "sympy/crypto/tests/test_crypto.py::test_encipher_vigenere", "sympy/crypto/tests/test_crypto.py::test_decipher_vigenere", "sympy/crypto/tests/test_crypto.py::test_encipher_hill", "sympy/crypto/tests/test_crypto.py::test_decipher_hill", "sympy/crypto/tests/test_crypto.py::test_encipher_bifid5", "sympy/crypto/tests/test_crypto.py::test_bifid5_square", "sympy/crypto/tests/test_crypto.py::test_decipher_bifid5", "sympy/crypto/tests/test_crypto.py::test_encipher_bifid6", "sympy/crypto/tests/test_crypto.py::test_decipher_bifid6", "sympy/crypto/tests/test_crypto.py::test_bifid6_square", "sympy/crypto/tests/test_crypto.py::test_rsa_public_key", "sympy/crypto/tests/test_crypto.py::test_rsa_private_key", "sympy/crypto/tests/test_crypto.py::test_encipher_rsa", "sympy/crypto/tests/test_crypto.py::test_decipher_rsa", "sympy/crypto/tests/test_crypto.py::test_kid_rsa_public_key", "sympy/crypto/tests/test_crypto.py::test_kid_rsa_private_key", "sympy/crypto/tests/test_crypto.py::test_encipher_kid_rsa", "sympy/crypto/tests/test_crypto.py::test_decipher_kid_rsa", "sympy/crypto/tests/test_crypto.py::test_encode_morse", "sympy/crypto/tests/test_crypto.py::test_decode_morse", "sympy/crypto/tests/test_crypto.py::test_lfsr_sequence", "sympy/crypto/tests/test_crypto.py::test_lfsr_autocorrelation", "sympy/crypto/tests/test_crypto.py::test_lfsr_connection_polynomial", "sympy/crypto/tests/test_crypto.py::test_elgamal_private_key", "sympy/crypto/tests/test_crypto.py::test_elgamal", "sympy/crypto/tests/test_crypto.py::test_dh_private_key", "sympy/crypto/tests/test_crypto.py::test_dh_public_key", "sympy/crypto/tests/test_crypto.py::test_dh_shared_key", "sympy/crypto/tests/test_crypto.py::test_padded_key", "sympy/crypto/tests/test_crypto.py::test_bifid", "sympy/utilities/tests/test_misc.py::test_translate", "sympy/utilities/tests/test_misc.py::test_replace" ]
[]
[]
[]
BSD
371
sympy__sympy-10378
6d280c95fb9ab67fd7f395895201781eedbe1150
2016-01-10 10:56:04
0241b35cae5f2adc04dc37b25f7dc9c5f00bd746
diff --git a/sympy/simplify/powsimp.py b/sympy/simplify/powsimp.py index c7a84ae522..66c911d211 100644 --- a/sympy/simplify/powsimp.py +++ b/sympy/simplify/powsimp.py @@ -242,6 +242,8 @@ def bkey(b, e=None): else: c, m = e.as_coeff_Mul(rational=True) if c is not S.One: + if m.is_integer: + return (b, Integer(c.q)), m*Integer(c.p) return (b**m, Integer(c.q)), Integer(c.p) else: return (b**e, S.One), S.One
Simplification bug on alternating series. Hi! ```python from sympy import * n = Symbol('n', odd=true, nonzero=true) x = (-1)**(n/2 - Rational(1,2)) - (-1)**(3*n/2 - Rational(1,2)) print( x.subs(n,1)) # 2 print( x.subs(n,3)) # -2 print( x.subs(n,5)) # 2 print(simplify(x)) # -> 2 bug ``` I think this should be considered a bug.
sympy/sympy
diff --git a/sympy/simplify/tests/test_powsimp.py b/sympy/simplify/tests/test_powsimp.py index 9d552fee49..8e4011c44b 100644 --- a/sympy/simplify/tests/test_powsimp.py +++ b/sympy/simplify/tests/test_powsimp.py @@ -1,6 +1,7 @@ from sympy import ( symbols, powsimp, symbols, MatrixSymbol, sqrt, pi, Mul, gamma, Function, - S, I, exp, simplify, sin, E, log, hyper, Symbol, Dummy, powdenest, root) + S, I, exp, simplify, sin, E, log, hyper, Symbol, Dummy, powdenest, root, + Rational) from sympy.abc import x, y, z, t, a, b, c, d, e, f, g, h, i, k @@ -281,3 +282,16 @@ def test_issue_from_PR1599(): assert (powsimp(root(n1, 3)*root(n2, 3)*root(n3, 3)*root(n4, 3)) == -(-1)**(S(1)/3)* (-n1)**(S(1)/3)*(-n2)**(S(1)/3)*(-n3)**(S(1)/3)*(-n4)**(S(1)/3)) + + +def test_issue_10195(): + a = Symbol('a', integer=True) + l = Symbol('l', even=True, nonzero=True) + n = Symbol('n', odd=True) + e_x = (-1)**(n/2 - Rational(1, 2)) - (-1)**(3*n/2 - Rational(1, 2)) + assert powsimp((-1)**(l/2)) == I**l + assert powsimp((-1)**(n/2)) == I**n + assert powsimp((-1)**(3*n/2)) == -I**n + assert powsimp(e_x) == (-1)**(n/2 - Rational(1, 2)) + (-1)**(3*n/2 + + Rational(1,2)) + assert powsimp((-1)**(3*a/2)) == (-I)**a
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 1 }, "num_modified_files": 1 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "mpmath>=0.19", "pytest" ], "pre_install": [], "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///croot/attrs_1668696182826/work certifi @ file:///croot/certifi_1671487769961/work/certifi flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work mpmath==1.3.0 packaging @ file:///croot/packaging_1671697413597/work pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pytest==7.1.2 -e git+https://github.com/sympy/sympy.git@6d280c95fb9ab67fd7f395895201781eedbe1150#egg=sympy tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing_extensions @ file:///croot/typing_extensions_1669924550328/work zipp @ file:///croot/zipp_1672387121353/work
name: sympy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=22.1.0=py37h06a4308_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - flit-core=3.6.0=pyhd3eb1b0_0 - importlib-metadata=4.11.3=py37h06a4308_0 - importlib_metadata=4.11.3=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=22.0=py37h06a4308_0 - pip=22.3.1=py37h06a4308_0 - pluggy=1.0.0=py37h06a4308_1 - py=1.11.0=pyhd3eb1b0_0 - pytest=7.1.2=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py37h06a4308_0 - typing_extensions=4.4.0=py37h06a4308_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zipp=3.11.0=py37h06a4308_0 - zlib=1.2.13=h5eee18b_1 - pip: - mpmath==1.3.0 prefix: /opt/conda/envs/sympy
[ "sympy/simplify/tests/test_powsimp.py::test_issue_10195" ]
[]
[ "sympy/simplify/tests/test_powsimp.py::test_powsimp", "sympy/simplify/tests/test_powsimp.py::test_powsimp_negated_base", "sympy/simplify/tests/test_powsimp.py::test_powsimp_nc", "sympy/simplify/tests/test_powsimp.py::test_issue_6440", "sympy/simplify/tests/test_powsimp.py::test_powdenest", "sympy/simplify/tests/test_powsimp.py::test_powdenest_polar", "sympy/simplify/tests/test_powsimp.py::test_issue_5805", "sympy/simplify/tests/test_powsimp.py::test_issue_9324_powsimp_on_matrix_symbol", "sympy/simplify/tests/test_powsimp.py::test_issue_6367", "sympy/simplify/tests/test_powsimp.py::test_powsimp_polar", "sympy/simplify/tests/test_powsimp.py::test_issue_5728", "sympy/simplify/tests/test_powsimp.py::test_issue_from_PR1599" ]
[]
BSD
372
m-lab__bigsanity-17
7bb4df0ddb204026af693e91f33e13a039f66a3d
2016-01-11 18:53:45
7bb4df0ddb204026af693e91f33e13a039f66a3d
diff --git a/bigsanity/query_construct.py b/bigsanity/query_construct.py index 58e1b2d..cead3cd 100644 --- a/bigsanity/query_construct.py +++ b/bigsanity/query_construct.py @@ -102,6 +102,26 @@ def _project_has_intermediate_snapshots(project): project == constants.PROJECT_ID_NPAD) +def _project_to_time_field(project): + """Returns the appropriate test log time field for the project type. + + Returns the appropriate test log time field for a test given its project + type. All web100 M-Lab tests use 'web100_log_entry.log_time', while Paris + Traceroute uses 'log_time'. + + Args: + project: The numeric ID of the project (e.g. NDT = 0). + + Returns: + The string name of the log time field for the given project in the + BigQuery dataset schema. + """ + if project == constants.PROJECT_ID_PARIS_TRACEROUTE: + return 'log_time' + else: + return 'web100_log_entry.log_time' + + class TableEquivalenceQueryGenerator(object): """Generates queries to test the equivalence of two M-Lab tables.""" @@ -153,17 +173,18 @@ class TableEquivalenceQueryGenerator(object): return _construct_test_id_subquery(tables, conditions) def _format_time_range_condition(self): + time_field = _project_to_time_field(self._project) start_time = _to_unix_timestamp(self._time_range_start) start_time_human = _to_human_readable_date(self._time_range_start) end_time = _to_unix_timestamp(self._time_range_end) end_time_human = _to_human_readable_date(self._time_range_end) - return ( - '((web100_log_entry.log_time >= {start_time}) AND -- {start_time_human}' - '\n (web100_log_entry.log_time < {end_time})) -- {end_time_human}' - ).format(start_time=start_time, - start_time_human=start_time_human, - end_time=end_time, - end_time_human=end_time_human) + return ('(({time_field} >= {start_time}) AND -- {start_time_human}' + '\n ({time_field} < {end_time})) -- {end_time_human}' + ).format(time_field=time_field, + start_time=start_time, + start_time_human=start_time_human, + end_time=end_time, + end_time_human=end_time_human) class TableEquivalenceQueryGeneratorFactory(object):
paris_traceroute uses log_time paris_traceroute data uses `log_time` rather than `web100_log_entry.log_time` since Paris Traceroute is not web100 based. We need to adjust the query construction to address this.
m-lab/bigsanity
diff --git a/tests/test_query_construct.py b/tests/test_query_construct.py index 22441d3..a64f523 100644 --- a/tests/test_query_construct.py +++ b/tests/test_query_construct.py @@ -284,8 +284,8 @@ class TableEquivalenceQueryGeneratorTest(unittest.TestCase): plx.google:m_lab.2015_01.all WHERE project = 3 - AND ((web100_log_entry.log_time >= 1419724800) AND -- 2014-12-28 - (web100_log_entry.log_time < 1420243200)) -- 2015-01-03 + AND ((log_time >= 1419724800) AND -- 2014-12-28 + (log_time < 1420243200)) -- 2015-01-03 ) AS per_month FULL OUTER JOIN EACH ( @@ -294,8 +294,8 @@ class TableEquivalenceQueryGeneratorTest(unittest.TestCase): FROM plx.google:m_lab.paris_traceroute.all WHERE - ((web100_log_entry.log_time >= 1419724800) AND -- 2014-12-28 - (web100_log_entry.log_time < 1420243200)) -- 2015-01-03 + ((log_time >= 1419724800) AND -- 2014-12-28 + (log_time < 1420243200)) -- 2015-01-03 ) AS per_project ON per_month.test_id=per_project.test_id
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 2 }, "num_modified_files": 1 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -r requirements.txt && pip install -r test-requirements.txt", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements.txt", "test-requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
coverage==7.8.0 exceptiongroup==1.2.2 iniconfig==2.1.0 mock==5.2.0 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 pytest-cov==6.0.0 python-dateutil==2.9.0.post0 six==1.17.0 tomli==2.2.1
name: bigsanity channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.8.0 - exceptiongroup==1.2.2 - iniconfig==2.1.0 - mock==5.2.0 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - pytest-cov==6.0.0 - python-dateutil==2.9.0.post0 - six==1.17.0 - tomli==2.2.1 prefix: /opt/conda/envs/bigsanity
[ "tests/test_query_construct.py::TableEquivalenceQueryGeneratorTest::test_correct_query_generation_for_paris_traceroute" ]
[]
[ "tests/test_query_construct.py::TableEquivalenceQueryGeneratorTest::test_correct_query_generation_for_ndt_across_months", "tests/test_query_construct.py::TableEquivalenceQueryGeneratorTest::test_correct_query_generation_for_ndt_full_month", "tests/test_query_construct.py::TableEquivalenceQueryGeneratorTest::test_correct_query_generation_for_ndt_within_single_month", "tests/test_query_construct.py::TableEquivalenceQueryGeneratorTest::test_correct_query_generation_for_npad", "tests/test_query_construct.py::TableEquivalenceQueryGeneratorTest::test_correct_query_generation_for_sidestream" ]
[]
Apache License 2.0
373
scrapy__scrapy-1671
f01fd076420f0e58a1a165be31ec505eeb561ef4
2016-01-12 09:51:05
6aa85aee2a274393307ac3e777180fcbdbdc9848
diff --git a/scrapy/utils/iterators.py b/scrapy/utils/iterators.py index ce59c9719..b0688791e 100644 --- a/scrapy/utils/iterators.py +++ b/scrapy/utils/iterators.py @@ -17,7 +17,7 @@ logger = logging.getLogger(__name__) def xmliter(obj, nodename): """Return a iterator of Selector's over all nodes of a XML document, - given tha name of the node to iterate. Useful for parsing XML feeds. + given the name of the node to iterate. Useful for parsing XML feeds. obj can be: - a Response object @@ -35,7 +35,7 @@ def xmliter(obj, nodename): header_end = re_rsearch(HEADER_END_RE, text) header_end = text[header_end[1]:].strip() if header_end else '' - r = re.compile(r"<{0}[\s>].*?</{0}>".format(nodename_patt), re.DOTALL) + r = re.compile(r'<%(np)s[\s>].*?</%(np)s>' % {'np': nodename_patt}, re.DOTALL) for match in r.finditer(text): nodetext = header_start + match.group() + header_end yield Selector(text=nodetext, type='xml').xpath('//' + nodename)[0] @@ -48,7 +48,7 @@ def xmliter_lxml(obj, nodename, namespace=None, prefix='x'): iterable = etree.iterparse(reader, tag=tag, encoding=reader.encoding) selxpath = '//' + ('%s:%s' % (prefix, nodename) if namespace else nodename) for _, node in iterable: - nodetext = etree.tostring(node) + nodetext = etree.tostring(node, encoding='unicode') node.clear() xs = Selector(text=nodetext, type='xml') if namespace: @@ -128,8 +128,11 @@ def csviter(obj, delimiter=None, headers=None, encoding=None, quotechar=None): def _body_or_str(obj, unicode=True): - assert isinstance(obj, (Response, six.string_types, bytes)), \ - "obj must be Response or basestring, not %s" % type(obj).__name__ + expected_types = (Response, six.text_type, six.binary_type) + assert isinstance(obj, expected_types), \ + "obj must be %s, not %s" % ( + " or ".join(t.__name__ for t in expected_types), + type(obj).__name__) if isinstance(obj, Response): if not unicode: return obj.body
XMLFeedSpider encoding issue Scrapy version: 1.04 My spider (from the XMLFeedSpider example in the docs) doesn´t seem to read the defined itertag since it contains the iso-8859-1 letter "Þ" http://www.w3schools.com/charsets/ref_html_8859.asp I´ve tried my code on a different url with english xml tags and it works fine. ```python # -*- coding: utf-8 -*- import scrapy from scrapy.spiders import XMLFeedSpider from althingi_scraper.items import PartyItem class PartySpider(XMLFeedSpider): name = 'party' allowed_domains = ['http://www.althingi.is'] #session = '145' start_urls = [ #'http://www.althingi.is/altext/xml/thingflokkar/?lthing=%s' % session, 'http://www.althingi.is/altext/xml/thingflokkar/' ] itertag = 'þingflokkar' def parse_node(self, response, node): item = PartyItem() item['party_id'] = node.xpath('@id').extract() item['name'] = node.xpath('heiti').extract() #item['short_abbr'] = node.xpath('stuttskammstöfun').extract() #item['long_abbr'] = node.xpath('löngskammstöfun').extract() return item ``` Any thoughts?
scrapy/scrapy
diff --git a/tests/test_utils_iterators.py b/tests/test_utils_iterators.py index d42ed2c91..b2e3889a4 100644 --- a/tests/test_utils_iterators.py +++ b/tests/test_utils_iterators.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import os import six from twisted.trial import unittest @@ -46,6 +47,60 @@ class XmliterTestCase(unittest.TestCase): for e in self.xmliter(response, 'matchme...')] self.assertEqual(nodenames, [['matchme...']]) + def test_xmliter_unicode(self): + # example taken from https://github.com/scrapy/scrapy/issues/1665 + body = u"""<?xml version="1.0" encoding="UTF-8"?> + <þingflokkar> + <þingflokkur id="26"> + <heiti /> + <skammstafanir> + <stuttskammstöfun>-</stuttskammstöfun> + <löngskammstöfun /> + </skammstafanir> + <tímabil> + <fyrstaþing>80</fyrstaþing> + </tímabil> + </þingflokkur> + <þingflokkur id="21"> + <heiti>Alþýðubandalag</heiti> + <skammstafanir> + <stuttskammstöfun>Ab</stuttskammstöfun> + <löngskammstöfun>Alþb.</löngskammstöfun> + </skammstafanir> + <tímabil> + <fyrstaþing>76</fyrstaþing> + <síðastaþing>123</síðastaþing> + </tímabil> + </þingflokkur> + <þingflokkur id="27"> + <heiti>Alþýðuflokkur</heiti> + <skammstafanir> + <stuttskammstöfun>A</stuttskammstöfun> + <löngskammstöfun>Alþfl.</löngskammstöfun> + </skammstafanir> + <tímabil> + <fyrstaþing>27</fyrstaþing> + <síðastaþing>120</síðastaþing> + </tímabil> + </þingflokkur> + </þingflokkar>""" + + for r in ( + # with bytes + XmlResponse(url="http://example.com", body=body.encode('utf-8')), + # Unicode body needs encoding information + XmlResponse(url="http://example.com", body=body, encoding='utf-8')): + + attrs = [] + for x in self.xmliter(r, u'þingflokkur'): + attrs.append((x.xpath('@id').extract(), + x.xpath(u'./skammstafanir/stuttskammstöfun/text()').extract(), + x.xpath(u'./tímabil/fyrstaþing/text()').extract())) + + self.assertEqual(attrs, + [([u'26'], [u'-'], [u'80']), + ([u'21'], [u'Ab'], [u'76']), + ([u'27'], [u'A'], [u'27'])]) def test_xmliter_text(self): body = u"""<?xml version="1.0" encoding="UTF-8"?><products><product>one</product><product>two</product></products>""" @@ -96,6 +151,10 @@ class XmliterTestCase(unittest.TestCase): self.assertRaises(StopIteration, next, iter) + def test_xmliter_objtype_exception(self): + i = self.xmliter(42, 'product') + self.assertRaises(AssertionError, next, i) + def test_xmliter_encoding(self): body = b'<?xml version="1.0" encoding="ISO-8859-9"?>\n<xml>\n <item>Some Turkish Characters \xd6\xc7\xde\xdd\xd0\xdc \xfc\xf0\xfd\xfe\xe7\xf6</item>\n</xml>\n\n' response = XmlResponse('http://www.example.com', body=body) @@ -169,6 +228,9 @@ class LxmlXmliterTestCase(XmliterTestCase): node = next(my_iter) self.assertEqual(node.xpath('f:name/text()').extract(), ['African Coffee Table']) + def test_xmliter_objtype_exception(self): + i = self.xmliter(42, 'product') + self.assertRaises(TypeError, next, i) class UtilsCsvTestCase(unittest.TestCase): sample_feeds_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'sample_data', 'feeds')
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 1 }, "num_modified_files": 1 }
1.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==25.3.0 Automat==24.8.1 cffi==1.17.1 constantly==23.10.4 coverage==7.8.0 cryptography==44.0.2 cssselect==1.3.0 exceptiongroup==1.2.2 hyperlink==21.0.0 idna==3.10 incremental==24.7.2 iniconfig==2.1.0 jmespath==1.0.1 lxml==5.3.1 packaging==24.2 parsel==1.10.0 pluggy==1.5.0 pyasn1==0.6.1 pyasn1_modules==0.4.2 pycparser==2.22 PyDispatcher==2.0.7 pyOpenSSL==25.0.0 pytest==8.3.5 pytest-cov==6.0.0 queuelib==1.7.0 -e git+https://github.com/scrapy/scrapy.git@f01fd076420f0e58a1a165be31ec505eeb561ef4#egg=Scrapy service-identity==24.2.0 six==1.17.0 tomli==2.2.1 Twisted==24.11.0 typing_extensions==4.13.0 w3lib==2.3.1 zope.interface==7.2
name: scrapy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==25.3.0 - automat==24.8.1 - cffi==1.17.1 - constantly==23.10.4 - coverage==7.8.0 - cryptography==44.0.2 - cssselect==1.3.0 - exceptiongroup==1.2.2 - hyperlink==21.0.0 - idna==3.10 - incremental==24.7.2 - iniconfig==2.1.0 - jmespath==1.0.1 - lxml==5.3.1 - packaging==24.2 - parsel==1.10.0 - pluggy==1.5.0 - pyasn1==0.6.1 - pyasn1-modules==0.4.2 - pycparser==2.22 - pydispatcher==2.0.7 - pyopenssl==25.0.0 - pytest==8.3.5 - pytest-cov==6.0.0 - queuelib==1.7.0 - service-identity==24.2.0 - six==1.17.0 - tomli==2.2.1 - twisted==24.11.0 - typing-extensions==4.13.0 - w3lib==2.3.1 - zope-interface==7.2 prefix: /opt/conda/envs/scrapy
[ "tests/test_utils_iterators.py::LxmlXmliterTestCase::test_xmliter_unicode" ]
[ "tests/test_utils_iterators.py::UtilsCsvTestCase::test_csviter_defaults", "tests/test_utils_iterators.py::UtilsCsvTestCase::test_csviter_delimiter", "tests/test_utils_iterators.py::UtilsCsvTestCase::test_csviter_delimiter_binary_response_assume_utf8_encoding", "tests/test_utils_iterators.py::UtilsCsvTestCase::test_csviter_encoding", "tests/test_utils_iterators.py::UtilsCsvTestCase::test_csviter_exception", "tests/test_utils_iterators.py::UtilsCsvTestCase::test_csviter_falserow", "tests/test_utils_iterators.py::UtilsCsvTestCase::test_csviter_headers", "tests/test_utils_iterators.py::UtilsCsvTestCase::test_csviter_quotechar", "tests/test_utils_iterators.py::UtilsCsvTestCase::test_csviter_wrong_quotechar" ]
[ "tests/test_utils_iterators.py::XmliterTestCase::test_xmliter", "tests/test_utils_iterators.py::XmliterTestCase::test_xmliter_encoding", "tests/test_utils_iterators.py::XmliterTestCase::test_xmliter_exception", "tests/test_utils_iterators.py::XmliterTestCase::test_xmliter_namespaces", "tests/test_utils_iterators.py::XmliterTestCase::test_xmliter_objtype_exception", "tests/test_utils_iterators.py::XmliterTestCase::test_xmliter_text", "tests/test_utils_iterators.py::XmliterTestCase::test_xmliter_unicode", "tests/test_utils_iterators.py::XmliterTestCase::test_xmliter_unusual_node", "tests/test_utils_iterators.py::LxmlXmliterTestCase::test_xmliter", "tests/test_utils_iterators.py::LxmlXmliterTestCase::test_xmliter_encoding", "tests/test_utils_iterators.py::LxmlXmliterTestCase::test_xmliter_exception", "tests/test_utils_iterators.py::LxmlXmliterTestCase::test_xmliter_iterate_namespace", "tests/test_utils_iterators.py::LxmlXmliterTestCase::test_xmliter_namespaces", "tests/test_utils_iterators.py::LxmlXmliterTestCase::test_xmliter_namespaces_prefix", "tests/test_utils_iterators.py::LxmlXmliterTestCase::test_xmliter_objtype_exception", "tests/test_utils_iterators.py::LxmlXmliterTestCase::test_xmliter_text", "tests/test_utils_iterators.py::LxmlXmliterTestCase::test_xmliter_unusual_node", "tests/test_utils_iterators.py::TestHelper::test_body_or_str" ]
[]
BSD 3-Clause "New" or "Revised" License
374
ARMmbed__yotta-656
16cc2baeba653dc77e3ce32c20018b32ab108bf4
2016-01-12 15:29:25
16cc2baeba653dc77e3ce32c20018b32ab108bf4
diff --git a/yotta/lib/pack.py b/yotta/lib/pack.py index ade5aae..f7ef9d0 100644 --- a/yotta/lib/pack.py +++ b/yotta/lib/pack.py @@ -257,6 +257,12 @@ class Pack(object): else: return None + def getKeywords(self): + if self.description: + return self.description.get('keywords', []) + else: + return [] + def _parseIgnoreFile(self, f): r = [] for l in f: diff --git a/yotta/options/__init__.py b/yotta/options/__init__.py index 1482b43..aa66e80 100644 --- a/yotta/options/__init__.py +++ b/yotta/options/__init__.py @@ -11,6 +11,7 @@ from . import noninteractive from . import registry from . import target from . import config +from . import force # this modifies argparse when it's imported: from . import parser diff --git a/yotta/options/force.py b/yotta/options/force.py new file mode 100644 index 0000000..33c8e6e --- /dev/null +++ b/yotta/options/force.py @@ -0,0 +1,12 @@ +# Copyright 2014-2015 ARM Limited +# +# Licensed under the Apache License, Version 2.0 +# See LICENSE file for details. + +def addTo(parser): + parser.add_argument('-f', '--force', action='store_true', dest="force", + help='Force the operation to (try to) continue even in situations which '+ + 'would be an error.' + ) + + diff --git a/yotta/publish.py b/yotta/publish.py index 28d999b..721d932 100644 --- a/yotta/publish.py +++ b/yotta/publish.py @@ -8,11 +8,50 @@ import logging # validate, , validate things, internal from .lib import validate +# options, , shared options, internal +import yotta.options as options def addOptions(parser): - # no options + options.force.addTo(parser) + +# python 2 + 3 compatibility +try: + global input + input = raw_input +except NameError: pass +def prePublishCheck(p, force=False, interactive=True): + need_ok = False + if p.description.get('bin', None) is not None: + logging.warning( + 'This is an executable application, not a re-usable library module. Other modules will not be able to depend on it!' + ) + need_ok = True + + official_keywords = [x for x in p.getKeywords() if x.endswith('-official')] + if len(official_keywords): + need_ok = True + for k in official_keywords: + prefix = k[:-len('-official')] + logging.warning( + ('You\'re publishing with the %s tag. Is this really an '+ + 'officially supported %s module? If not, please remove the %s '+ + 'tag from your %s file. If you are unsure, please ask on the '+ + 'issue tracker.') % ( + k, prefix, k, p.description_filename + ) + ) + + if need_ok and not interactive: + logging.error('--noninteractive prevents user confirmation. Please re-run with --force') + return 1 + + if need_ok and not force: + input("If you still want to publish, press [enter] to continue.") + + return 0 + def execCommand(args, following_args): p = validate.currentDirectoryModuleOrTarget() if not p: @@ -22,17 +61,9 @@ def execCommand(args, following_args): logging.error('The working directory is not clean. Commit before publishing!') return 1 - if p.description.get('bin', None) is not None: - logging.warning( - 'This is an executable application, not a re-usable library module. Other modules will not be able to depend on it!' - ) - # python 2 + 3 compatibility - try: - global input - input = raw_input - except NameError: - pass - raw_input("If you still want to publish it, press [enter] to continue.") + errcode = prePublishCheck(p, args.force, args.interactive) + if errcode and not args.force: + return errcode error = p.publish(args.registry) if error:
Discourage accidental use of mbed-official keyword There have been several cases where target descriptions have been published with descriptions and keywords indicating that they are officially supported, when in fact they aren't. yotta should warn when these keywords are used, to make sure that their use is intentional.
ARMmbed/yotta
diff --git a/yotta/test/cli/test_publish.py b/yotta/test/cli/test_publish.py index de93d77..11b713e 100644 --- a/yotta/test/cli/test_publish.py +++ b/yotta/test/cli/test_publish.py @@ -13,7 +13,7 @@ import tempfile # internal modules: from yotta.lib.fsutils import rmRf from . import cli - +from . import util Test_Target = "x86-osx-native,*" @@ -54,6 +54,26 @@ Public_Module_JSON = '''{ }''' +Test_Publish = { +'module.json':'''{ + "name": "test-publish", + "version": "0.0.0", + "description": "Test yotta publish", + "author": "James Crosby <[email protected]>", + "license": "Apache-2.0", + "keywords": ["mbed-official"], + "dependencies":{ + } +}''', +'readme.md':'''##This is a test module used in yotta's test suite.''', +'source/foo.c':'''#include "stdio.h" +int foo(){ + printf("foo!\\n"); + return 7; +}''' +} + + class TestCLIPublish(unittest.TestCase): @classmethod def setUpClass(cls): @@ -89,6 +109,15 @@ class TestCLIPublish(unittest.TestCase): else: del os.environ['YOTTA_USER_SETTINGS_DIR'] + def test_warnOfficialKeywords(self): + path = util.writeTestFiles(Test_Publish, True) + + stdout, stderr, statuscode = cli.run(['-t', 'x86-linux-native', '--noninteractive', 'publish'], cwd=path) + self.assertNotEqual(statuscode, 0) + self.assertIn('Is this really an officially supported mbed module', stdout + stderr) + + util.rmRf(path) + if __name__ == '__main__': unittest.main()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 3 }
0.12
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc cmake ninja-build" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
argcomplete==1.0.0 certifi==2025.1.31 cffi==1.17.1 charset-normalizer==3.4.1 colorama==0.3.9 cryptography==44.0.2 Deprecated==1.2.18 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work future==1.0.0 hgapi==1.7.4 idna==3.10 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work intelhex==2.3.0 intervaltree==3.1.0 Jinja2==2.11.3 jsonpointer==2.0 jsonschema==2.6.0 MarkupSafe==3.0.2 mbed_test_wrapper==0.0.3 packaging @ file:///croot/packaging_1734472117206/work pathlib==1.0.1 pluggy @ file:///croot/pluggy_1733169602837/work project-generator-definitions==0.2.46 project_generator==0.8.17 pycparser==2.22 pyelftools==0.23 PyGithub==1.54.1 PyJWT==1.7.1 pyocd==0.15.0 pytest @ file:///croot/pytest_1738938843180/work pyusb==1.3.1 PyYAML==3.13 requests==2.32.3 semantic-version==2.10.0 six==1.17.0 sortedcontainers==2.4.0 tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work urllib3==2.3.0 valinor==0.0.15 websocket-client==1.8.0 wrapt==1.17.2 xmltodict==0.14.2 -e git+https://github.com/ARMmbed/yotta.git@16cc2baeba653dc77e3ce32c20018b32ab108bf4#egg=yotta
name: yotta channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - argcomplete==1.0.0 - argparse==1.4.0 - certifi==2025.1.31 - cffi==1.17.1 - charset-normalizer==3.4.1 - colorama==0.3.9 - cryptography==44.0.2 - deprecated==1.2.18 - future==1.0.0 - hgapi==1.7.4 - idna==3.10 - intelhex==2.3.0 - intervaltree==3.1.0 - jinja2==2.11.3 - jsonpointer==2.0 - jsonschema==2.6.0 - markupsafe==3.0.2 - mbed-test-wrapper==0.0.3 - pathlib==1.0.1 - project-generator==0.8.17 - project-generator-definitions==0.2.46 - pycparser==2.22 - pyelftools==0.23 - pygithub==1.54.1 - pyjwt==1.7.1 - pyocd==0.15.0 - pyusb==1.3.1 - pyyaml==3.13 - requests==2.32.3 - semantic-version==2.10.0 - six==1.17.0 - sortedcontainers==2.4.0 - urllib3==2.3.0 - valinor==0.0.15 - websocket-client==1.8.0 - wrapt==1.17.2 - xmltodict==0.14.2 prefix: /opt/conda/envs/yotta
[ "yotta/test/cli/test_publish.py::TestCLIPublish::test_warnOfficialKeywords" ]
[ "yotta/test/cli/test_publish.py::TestCLIPublish::test_publishNotAuthed" ]
[ "yotta/test/cli/test_publish.py::TestCLIPublish::test_publishPrivate" ]
[]
Apache License 2.0
375
Pylons__webob-230
9400c049d05c8ba350daf119aa16ded24ece31f6
2016-01-12 17:50:01
9400c049d05c8ba350daf119aa16ded24ece31f6
diff --git a/webob/exc.py b/webob/exc.py index 57a81b5..044c00a 100644 --- a/webob/exc.py +++ b/webob/exc.py @@ -165,10 +165,12 @@ References: """ +import json from string import Template import re import sys +from webob.acceptparse import Accept from webob.compat import ( class_types, text_, @@ -250,7 +252,7 @@ ${body}''') empty_body = False def __init__(self, detail=None, headers=None, comment=None, - body_template=None, **kw): + body_template=None, json_formatter=None, **kw): Response.__init__(self, status='%s %s' % (self.code, self.title), **kw) @@ -265,6 +267,8 @@ ${body}''') if self.empty_body: del self.content_type del self.content_length + if json_formatter is not None: + self.json_formatter = json_formatter def __str__(self): return self.detail or self.explanation @@ -300,14 +304,31 @@ ${body}''') return self.html_template_obj.substitute(status=self.status, body=body) + def json_formatter(self, body, status, title, environ): + return {'message': body, + 'code': status, + 'title': title} + + def json_body(self, environ): + body = self._make_body(environ, no_escape) + jsonbody = self.json_formatter(body=body, status=self.status, + title=self.title, environ=environ) + return json.dumps(jsonbody) + def generate_response(self, environ, start_response): if self.content_length is not None: del self.content_length headerlist = list(self.headerlist) - accept = environ.get('HTTP_ACCEPT', '') - if accept and 'html' in accept or '*/*' in accept: + accept_value = environ.get('HTTP_ACCEPT', '') + accept = Accept(accept_value) + match = accept.best_match(['application/json', 'text/html', + 'text/plain'], default_match='text/plain') + if match == 'text/html': content_type = 'text/html' body = self.html_body(environ) + elif match == 'application/json': + content_type = 'application/json' + body = self.json_body(environ) else: content_type = 'text/plain' body = self.plain_body(environ)
Allow for JSON Exception Bodies I'm currently working on several projects that provide a JSON API using WebOb. Currently, however, whenever we use a `webob.exc` exception to return an error to the user (e.g., `webob.exc.HTTPBadRequest`) the body of that message is always in a content-type other than what they're expecting (HTML if they don't specify an Accept header, plain-text otherwise). There doesn't seem to be a pleasant, convenient, or simple way to make it use JSON beyond something like (the untested) following code: ```py import string import webob.exc class WSGIHTTPException(webob.exc.WSGIHTTPException): body_template_obj = string.Template('{"code", ${status}, "message": "${body}", "title": "${title}"}' plain_template_obj = string.Template('{"error": ${body}}') class HTTPBadRequest(webob.exc.HTTPBadRequest, WSGIHTTPException): pass class HTTPUnauthored(webob.exc.HTTPBadRequest, WSGIHTTPException): pass # etc. ``` This is particularly problematic because we have to redefine all of the exceptions we want to use to doubly inherit from our new sub-classed `WSGIHTTPException` and the original. It also doesn't handle the fact that we have to basically copy and paste [generate_response][] into our subclass so that we set the appropriate content-type header. Is it too much to ask to either: A) Add support for JSON response bodies in `WSGIHTTPException`s, or B) Make `WSGIHTTPException` slightly more modular so we can only override parts we need? [generate_response]: https://github.com/Pylons/webob/blob/7f98f694e7c1a569f53fb4085d084430ee8b2cc2/webob/exc.py#L302..L323 Thanks in advance,
Pylons/webob
diff --git a/tests/test_exc.py b/tests/test_exc.py index dcb1fed..8204783 100644 --- a/tests/test_exc.py +++ b/tests/test_exc.py @@ -1,3 +1,5 @@ +import json + from webob.request import Request from webob.dec import wsgify from webob import exc as webob_exc @@ -119,6 +121,57 @@ def test_WSGIHTTPException_html_body_w_comment(): '</html>' ) +def test_WSGIHTTPException_json_body_no_comment(): + class ValidationError(webob_exc.WSGIHTTPException): + code = '422' + title = 'Validation Failed' + explanation = 'Validation of an attribute failed.' + + exc = ValidationError(detail='Attribute "xyz" is invalid.') + body = exc.json_body({}) + eq_(json.loads(body), { + "code": "422 Validation Failed", + "title": "Validation Failed", + "message": "Validation of an attribute failed.<br /><br />\nAttribute" + ' "xyz" is invalid.\n\n', + }) + +def test_WSGIHTTPException_respects_application_json(): + class ValidationError(webob_exc.WSGIHTTPException): + code = '422' + title = 'Validation Failed' + explanation = 'Validation of an attribute failed.' + def start_response(status, headers, exc_info=None): + pass + + exc = ValidationError(detail='Attribute "xyz" is invalid.') + resp = exc.generate_response(environ={ + 'wsgi.url_scheme': 'HTTP', + 'SERVER_NAME': 'localhost', + 'SERVER_PORT': '80', + 'REQUEST_METHOD': 'PUT', + 'HTTP_ACCEPT': 'application/json', + }, start_response=start_response) + eq_(json.loads(resp[0].decode('utf-8')), { + "code": "422 Validation Failed", + "title": "Validation Failed", + "message": "Validation of an attribute failed.<br /><br />\nAttribute" + ' "xyz" is invalid.\n\n', + }) + +def test_WSGIHTTPException_allows_custom_json_formatter(): + def json_formatter(body, status, title, environ): + return {"fake": True} + class ValidationError(webob_exc.WSGIHTTPException): + code = '422' + title = 'Validation Failed' + explanation = 'Validation of an attribute failed.' + + exc = ValidationError(detail='Attribute "xyz" is invalid.', + json_formatter=json_formatter) + body = exc.json_body({}) + eq_(json.loads(body), {"fake": True}) + def test_WSGIHTTPException_generate_response(): def start_response(status, headers, exc_info=None): pass
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 1 }, "num_modified_files": 1 }
1.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[testing]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
coverage==7.8.0 exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work nose==1.3.7 packaging @ file:///croot/packaging_1734472117206/work pluggy @ file:///croot/pluggy_1733169602837/work pytest @ file:///croot/pytest_1738938843180/work tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work -e git+https://github.com/Pylons/webob.git@9400c049d05c8ba350daf119aa16ded24ece31f6#egg=WebOb
name: webob channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.8.0 - nose==1.3.7 prefix: /opt/conda/envs/webob
[ "tests/test_exc.py::test_WSGIHTTPException_json_body_no_comment", "tests/test_exc.py::test_WSGIHTTPException_respects_application_json", "tests/test_exc.py::test_WSGIHTTPException_allows_custom_json_formatter" ]
[]
[ "tests/test_exc.py::test_noescape_null", "tests/test_exc.py::test_noescape_not_basestring", "tests/test_exc.py::test_noescape_unicode", "tests/test_exc.py::test_strip_tags_empty", "tests/test_exc.py::test_strip_tags_newline_to_space", "tests/test_exc.py::test_strip_tags_zaps_carriage_return", "tests/test_exc.py::test_strip_tags_br_to_newline", "tests/test_exc.py::test_strip_tags_zaps_comments", "tests/test_exc.py::test_strip_tags_zaps_tags", "tests/test_exc.py::test_HTTPException", "tests/test_exc.py::test_exception_with_unicode_data", "tests/test_exc.py::test_WSGIHTTPException_headers", "tests/test_exc.py::test_WSGIHTTPException_w_body_template", "tests/test_exc.py::test_WSGIHTTPException_w_empty_body", "tests/test_exc.py::test_WSGIHTTPException___str__", "tests/test_exc.py::test_WSGIHTTPException_plain_body_no_comment", "tests/test_exc.py::test_WSGIHTTPException_html_body_w_comment", "tests/test_exc.py::test_WSGIHTTPException_generate_response", "tests/test_exc.py::test_WSGIHTTPException_call_w_body", "tests/test_exc.py::test_WSGIHTTPException_wsgi_response", "tests/test_exc.py::test_WSGIHTTPException_exception_newstyle", "tests/test_exc.py::test_WSGIHTTPException_exception_no_newstyle", "tests/test_exc.py::test_HTTPOk_head_of_proxied_head", "tests/test_exc.py::test_HTTPMove", "tests/test_exc.py::test_HTTPMove_location_not_none", "tests/test_exc.py::test_HTTPMove_location_newlines", "tests/test_exc.py::test_HTTPMove_add_slash_and_location", "tests/test_exc.py::test_HTTPMove_call_add_slash", "tests/test_exc.py::test_HTTPMove_call_query_string", "tests/test_exc.py::test_HTTPExceptionMiddleware_ok", "tests/test_exc.py::test_HTTPExceptionMiddleware_exception", "tests/test_exc.py::test_HTTPExceptionMiddleware_exception_exc_info_none", "tests/test_exc.py::test_status_map_is_deterministic" ]
[]
null
376
ross__requests-futures-28
cd3acaf2731eb7e48feb927d80615977a3ba8ede
2016-01-12 22:40:53
cd3acaf2731eb7e48feb927d80615977a3ba8ede
diff --git a/README.rst b/README.rst index 5a25b22..2cc5149 100644 --- a/README.rst +++ b/README.rst @@ -68,6 +68,15 @@ As a shortcut in case of just increasing workers number you can pass from requests_futures.sessions import FuturesSession session = FuturesSession(max_workers=10) +FutureSession will use an existing session object if supplied: + +.. code-block:: python + + from requests import session + from requests_futures.sessions import FuturesSession + my_session = session() + future_session = FuturesSession(session=my_session) + That's it. The api of requests.Session is preserved without any modifications beyond returning a Future rather than Response. As with all futures exceptions are shifted (thrown) to the future.result() call so try/except blocks should be diff --git a/requests_futures/sessions.py b/requests_futures/sessions.py index 3c52de7..905196f 100644 --- a/requests_futures/sessions.py +++ b/requests_futures/sessions.py @@ -27,7 +27,8 @@ from requests.adapters import DEFAULT_POOLSIZE, HTTPAdapter class FuturesSession(Session): - def __init__(self, executor=None, max_workers=2, *args, **kwargs): + def __init__(self, executor=None, max_workers=2, session=None, *args, + **kwargs): """Creates a FuturesSession Notes @@ -50,6 +51,7 @@ class FuturesSession(Session): self.mount('http://', HTTPAdapter(**adapter_kwargs)) self.executor = executor + self.session = session def request(self, *args, **kwargs): """Maintains the existing api for Session.request. @@ -60,7 +62,10 @@ class FuturesSession(Session): response in the background, e.g. call resp.json() so that json parsing happens in the background thread. """ - func = sup = super(FuturesSession, self).request + if self.session: + func = sup = self.session.request + else: + func = sup = super(FuturesSession, self).request background_callback = kwargs.pop('background_callback', None) if background_callback:
Allow FuturesSession to take a session object to defer to I'd like to use requests-futures with my own session object (in my case an instance of requests-oauthlib's `OAuth2Session`). The way grequests handles this is to add an __init__ kwarg which will take a session object and, if provided, defer requests calls to it. It seems like this should be possible to do with `FuturesSession` as well.
ross/requests-futures
diff --git a/test_requests_futures.py b/test_requests_futures.py index f9f6844..876ef6f 100644 --- a/test_requests_futures.py +++ b/test_requests_futures.py @@ -4,7 +4,7 @@ """Tests for Requests.""" from concurrent.futures import Future -from requests import Response +from requests import Response, session from os import environ from requests_futures.sessions import FuturesSession from unittest import TestCase, main @@ -54,6 +54,18 @@ class RequestsTestCase(TestCase): resp = future.result() self.assertEqual('boom', cm.exception.args[0]) + def test_supplied_session(self): + """ Tests the `session` keyword argument. """ + requests_session = session() + requests_session.headers['Foo'] = 'bar' + sess = FuturesSession(session=requests_session) + future = sess.get(httpbin('headers')) + self.assertIsInstance(future, Future) + resp = future.result() + self.assertIsInstance(resp, Response) + self.assertEqual(200, resp.status_code) + self.assertEqual(resp.json()['headers']['Foo'], 'bar') + def test_max_workers(self): """ Tests the `max_workers` shortcut. """ from concurrent.futures import ThreadPoolExecutor
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 2 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.5", "reqs_path": [ "requirements-python-3.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 charset-normalizer==2.0.12 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 requests==2.27.1 -e git+https://github.com/ross/requests-futures.git@cd3acaf2731eb7e48feb927d80615977a3ba8ede#egg=requests_futures tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 zipp==3.6.0
name: requests-futures channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - charset-normalizer==2.0.12 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - requests==2.27.1 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - zipp==3.6.0 prefix: /opt/conda/envs/requests-futures
[ "test_requests_futures.py::RequestsTestCase::test_supplied_session" ]
[]
[ "test_requests_futures.py::RequestsTestCase::test_context", "test_requests_futures.py::RequestsTestCase::test_futures_session", "test_requests_futures.py::RequestsTestCase::test_max_workers", "test_requests_futures.py::RequestsTestCase::test_redirect" ]
[]
Apache License 2.0
377
jupyter-incubator__sparkmagic-115
e29c15bf11fbada311796c36e1f5c9d7091b2667
2016-01-13 04:06:38
e29c15bf11fbada311796c36e1f5c9d7091b2667
diff --git a/remotespark/datawidgets/autovizwidget.py b/remotespark/datawidgets/autovizwidget.py index f0b6405..f166c2e 100644 --- a/remotespark/datawidgets/autovizwidget.py +++ b/remotespark/datawidgets/autovizwidget.py @@ -3,20 +3,14 @@ import pandas as pd from ipywidgets import FlexBox -from IPython.display import display +from remotespark.utils.ipythondisplay import IpythonDisplay from .encoding import Encoding from .encodingwidget import EncodingWidget from .ipywidgetfactory import IpyWidgetFactory from .plotlygraphs.graphrenderer import GraphRenderer -class IpythonDisplay(object): - @staticmethod - def display_to_ipython(to_display): - display(to_display) - - class AutoVizWidget(FlexBox): def __init__(self, df, encoding, renderer=None, ipywidget_factory=None, encoding_widget=None, ipython_display=None, nested_widget_mode=False, testing=False, **kwargs): @@ -74,14 +68,14 @@ class AutoVizWidget(FlexBox): # self.controls.children self.to_display.clear_output() - self.renderer.render(self.df, self.encoding, self.to_display) - self.encoding_widget.show_x(self.renderer.display_x(self.encoding.chart_type)) self.encoding_widget.show_y(self.renderer.display_y(self.encoding.chart_type)) self.encoding_widget.show_controls(self.renderer.display_controls(self.encoding.chart_type)) self.encoding_widget.show_logarithmic_x_axis(self.renderer.display_logarithmic_x_axis(self.encoding.chart_type)) self.encoding_widget.show_logarithmic_y_axis(self.renderer.display_logarithmic_y_axis(self.encoding.chart_type)) + self.renderer.render(self.df, self.encoding, self.to_display) + def _create_controls_widget(self): # Create types of viz hbox viz_types_widget = self._create_viz_types_buttons() diff --git a/remotespark/datawidgets/plotlygraphs/datagraph.py b/remotespark/datawidgets/plotlygraphs/datagraph.py index 130791b..a38a89a 100644 --- a/remotespark/datawidgets/plotlygraphs/datagraph.py +++ b/remotespark/datawidgets/plotlygraphs/datagraph.py @@ -1,17 +1,39 @@ # Copyright (c) 2015 [email protected] # Distributed under the terms of the Modified BSD License. -from plotly.tools import FigureFactory as FigFac -from plotly.offline import iplot +import pandas as pd +from remotespark.utils.ipythondisplay import IpythonDisplay -class DataGraph(object): - @staticmethod - def render(df, encoding, output): - table = FigFac.create_table(df) +class DataGraph(object): + """This does not use the table version of plotly because it freezes up the browser for >60 rows. Instead, we use + pandas df HTML representation.""" + def __init__(self, display=None): + if display is None: + self.display = IpythonDisplay() + else: + self.display = display + + def render(self, df, encoding, output): with output: - iplot(table, show_link=False) + max_rows = pd.get_option("display.max_rows") + max_cols = pd.get_option("display.max_columns") + show_dimensions = pd.get_option("display.show_dimensions") + + # This will hide the index column for pandas df. + self.display.html_to_ipython(""" +<style> + table.dataframe.hideme thead th:first-child { + display: none; + } + table.dataframe.hideme tbody th { + display: none; + } +</style> +""") + self.display.html_to_ipython(df.to_html(max_rows=max_rows, max_cols=max_cols, + show_dimensions=show_dimensions, notebook=True, classes="hideme")) @staticmethod def display_logarithmic_x_axis(): diff --git a/remotespark/datawidgets/plotlygraphs/piegraph.py b/remotespark/datawidgets/plotlygraphs/piegraph.py index d8caf93..ecd881c 100644 --- a/remotespark/datawidgets/plotlygraphs/piegraph.py +++ b/remotespark/datawidgets/plotlygraphs/piegraph.py @@ -4,16 +4,29 @@ from plotly.graph_objs import Pie, Figure, Data from plotly.offline import iplot +import remotespark.utils.configuration as conf + class PieGraph(object): @staticmethod def render(df, encoding, output): - series = df.groupby([encoding.x]).size() - data = [Pie(values=series.values.tolist(), labels=series.index.tolist())] + values, labels = PieGraph._get_x_values_labels(df, encoding) + max_slices_pie_graph = conf.max_slices_pie_graph() with output: - fig = Figure(data=Data(data)) - iplot(fig, show_link=False) + # There's performance issues with a large amount of slices. + # 1500 rows crash the browser. + # 500 rows take ~15 s. + # 100 rows is almost automatic. + if len(values) > max_slices_pie_graph: + print("There's {} values in your pie graph, which would render the graph unresponsive.\n" + "Please select another X with at most {} possible values." + .format(len(values), max_slices_pie_graph)) + else: + data = [Pie(values=values, labels=labels)] + + fig = Figure(data=Data(data)) + iplot(fig, show_link=False) @staticmethod def display_logarithmic_x_axis(): @@ -32,5 +45,6 @@ class PieGraph(object): return False @staticmethod - def _get_x_values(df, encoding): - return df[encoding.x].tolist() + def _get_x_values_labels(df, encoding): + series = df.groupby([encoding.x]).size() + return series.values.tolist(), series.index.tolist() diff --git a/remotespark/default_config.json b/remotespark/default_config.json index 9f9a410..5a9d09b 100644 --- a/remotespark/default_config.json +++ b/remotespark/default_config.json @@ -55,5 +55,6 @@ }, "use_auto_viz": true, - "max_results_sql": 800 + "max_results_sql": 2500, + "max_slices_pie_graph": 100 } diff --git a/remotespark/utils/configuration.py b/remotespark/utils/configuration.py index 0d1a311..e9abc9e 100644 --- a/remotespark/utils/configuration.py +++ b/remotespark/utils/configuration.py @@ -176,9 +176,14 @@ def ignore_ssl_errors(): @_override def use_auto_viz(): - return False + return True @_override def max_results_sql(): - return 800 + return 2500 + + +@_override +def max_slices_pie_graph(): + return 100 diff --git a/remotespark/utils/ipythondisplay.py b/remotespark/utils/ipythondisplay.py new file mode 100644 index 0000000..0615aea --- /dev/null +++ b/remotespark/utils/ipythondisplay.py @@ -0,0 +1,11 @@ +from IPython.core.display import display, HTML + + +class IpythonDisplay(object): + @staticmethod + def display_to_ipython(to_display): + display(to_display) + + @staticmethod + def html_to_ipython(to_display): + IpythonDisplay.display_to_ipython(HTML(to_display))
Incorrect visualizations on some sample data Ran the following code: hvac = sc.textFile('wasb:///HdiSamples/HdiSamples/SensorSampleData/hvac/HVAC.csv') from pyspark.sql import Row Doc = Row("TargetTemp", "ActualTemp", "System", "SystemAge", "BuildingID") def parseDocument(line): values = [str(x) for x in line.split(',')] return Doc(values[2], values[3], values[4], values[5], values[6]) documents = hvac.filter(lambda s: "Date" not in s).map(parseDocument) df = sqlContext.createDataFrame(documents) df.registerTempTable('data') and then %select * from data limit 100 The visualizations, at least for the pie graphs, are wrong. Screenshot: ![screen shot 2016-01-06 at 6 18 47 pm](https://cloud.githubusercontent.com/assets/13972471/12160578/3bdb966e-b4a2-11e5-97ce-ee297d0ca6cc.png) Clearly there is no building where the desired target temperature is 1.
jupyter-incubator/sparkmagic
diff --git a/tests/datawidgetstests/test_plotlygraphs.py b/tests/datawidgetstests/test_plotlygraphs.py index 6506b65..72b21b0 100644 --- a/tests/datawidgetstests/test_plotlygraphs.py +++ b/tests/datawidgetstests/test_plotlygraphs.py @@ -1,4 +1,5 @@ import pandas as pd +from mock import MagicMock from remotespark.datawidgets.plotlygraphs.graphbase import GraphBase from remotespark.datawidgets.plotlygraphs.piegraph import PieGraph @@ -65,6 +66,39 @@ def test_pie_graph_display_methods(): assert not PieGraph.display_logarithmic_y_axis() +def test_pie_graph_get_values_labels(): + records = [{u'buildingID': 0, u'date': u'6/1/13', u'temp_diff': 12}, + {u'buildingID': 1, u'date': u'6/1/13', u'temp_diff': 0}, + {u'buildingID': 2, u'date': u'6/1/14', u'temp_diff': 11}, + {u'buildingID': 0, u'date': u'6/1/15', u'temp_diff': 5}, + {u'buildingID': 1, u'date': u'6/1/16', u'temp_diff': 19}, + {u'buildingID': 2, u'date': u'6/1/17', u'temp_diff': 32}] + df = pd.DataFrame(records) + encoding = Encoding(chart_type=Encoding.chart_type_line, x="date", y="temp_diff", y_aggregation=Encoding.y_agg_sum) + + values, labels = PieGraph._get_x_values_labels(df, encoding) + + assert values == [2, 1, 1, 1, 1] + assert labels == ["6/1/13", "6/1/14", "6/1/15", "6/1/16", "6/1/17"] + + +def test_data_graph_render(): + records = [{u'buildingID': 0, u'date': u'6/1/13', u'temp_diff': 12}, + {u'buildingID': 1, u'date': u'6/1/13', u'temp_diff': 0}, + {u'buildingID': 2, u'date': u'6/1/14', u'temp_diff': 11}, + {u'buildingID': 0, u'date': u'6/1/15', u'temp_diff': 5}, + {u'buildingID': 1, u'date': u'6/1/16', u'temp_diff': 19}, + {u'buildingID': 2, u'date': u'6/1/17', u'temp_diff': 32}] + df = pd.DataFrame(records) + encoding = Encoding(chart_type=Encoding.chart_type_line, x="date", y="temp_diff", y_aggregation=Encoding.y_agg_sum) + display = MagicMock() + + data = DataGraph(display) + data.render(df, encoding, MagicMock()) + + assert display.html_to_ipython.call_count == 2 + + def test_data_graph_display_methods(): assert not DataGraph.display_x() assert not DataGraph.display_y()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_media", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 5 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "nose", "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
anyio==4.9.0 argon2-cffi==23.1.0 argon2-cffi-bindings==21.2.0 arrow==1.3.0 async-lru==2.0.5 attrs==25.3.0 babel==2.17.0 beautifulsoup4==4.13.3 bleach==6.2.0 certifi==2025.1.31 cffi==1.17.1 charset-normalizer==3.4.1 comm==0.2.2 decorator==5.2.1 defusedxml==0.7.1 exceptiongroup==1.2.2 fastjsonschema==2.21.1 fqdn==1.5.1 h11==0.14.0 httpcore==1.0.7 httpx==0.28.1 idna==3.10 importlib_metadata==8.6.1 iniconfig==2.1.0 ipykernel==4.1.1 ipython==4.0.0 ipython-genutils==0.2.0 ipywidgets==7.8.5 isoduration==20.11.0 Jinja2==3.1.6 json5==0.10.0 jsonpointer==3.0.0 jsonschema==4.23.0 jsonschema-specifications==2024.10.1 jupyter-events==0.12.0 jupyter-lsp==2.2.5 jupyter_client==8.6.3 jupyter_core==5.7.2 jupyter_server==2.15.0 jupyter_server_terminals==0.5.3 jupyterlab==4.1.5 jupyterlab_pygments==0.3.0 jupyterlab_server==2.27.3 jupyterlab_widgets==1.1.11 MarkupSafe==3.0.2 mistune==3.1.3 mock==5.2.0 narwhals==1.32.0 nbclient==0.10.2 nbconvert==7.16.6 nbformat==5.10.4 nose==1.3.7 notebook==7.1.3 notebook_shim==0.2.4 numpy==2.0.2 overrides==7.7.0 packaging==24.2 pandas==2.2.3 pandocfilters==1.5.1 pexpect==4.9.0 pickleshare==0.7.5 platformdirs==4.3.7 plotly==6.0.1 pluggy==1.5.0 prometheus_client==0.21.1 ptyprocess==0.7.0 pycparser==2.22 Pygments==2.19.1 pytest==8.3.5 python-dateutil==2.9.0.post0 python-json-logger==3.3.0 pytz==2025.2 PyYAML==6.0.2 pyzmq==26.3.0 referencing==0.36.2 -e git+https://github.com/jupyter-incubator/sparkmagic.git@e29c15bf11fbada311796c36e1f5c9d7091b2667#egg=remotespark requests==2.32.3 rfc3339-validator==0.1.4 rfc3986-validator==0.1.1 rpds-py==0.24.0 Send2Trash==1.8.3 simplegeneric==0.8.1 six==1.17.0 sniffio==1.3.1 soupsieve==2.6 terminado==0.18.1 tinycss2==1.4.0 tomli==2.2.1 tornado==6.4.2 traitlets==5.14.3 types-python-dateutil==2.9.0.20241206 typing_extensions==4.13.0 tzdata==2025.2 uri-template==1.3.0 urllib3==2.3.0 webcolors==24.11.1 webencodings==0.5.1 websocket-client==1.8.0 widgetsnbextension==3.6.10 zipp==3.21.0
name: sparkmagic channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - anyio==4.9.0 - argon2-cffi==23.1.0 - argon2-cffi-bindings==21.2.0 - arrow==1.3.0 - async-lru==2.0.5 - attrs==25.3.0 - babel==2.17.0 - beautifulsoup4==4.13.3 - bleach==6.2.0 - certifi==2025.1.31 - cffi==1.17.1 - charset-normalizer==3.4.1 - comm==0.2.2 - decorator==5.2.1 - defusedxml==0.7.1 - exceptiongroup==1.2.2 - fastjsonschema==2.21.1 - fqdn==1.5.1 - h11==0.14.0 - httpcore==1.0.7 - httpx==0.28.1 - idna==3.10 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - ipykernel==4.1.1 - ipython==4.0.0 - ipython-genutils==0.2.0 - ipywidgets==7.8.5 - isoduration==20.11.0 - jinja2==3.1.6 - json5==0.10.0 - jsonpointer==3.0.0 - jsonschema==4.23.0 - jsonschema-specifications==2024.10.1 - jupyter-client==8.6.3 - jupyter-core==5.7.2 - jupyter-events==0.12.0 - jupyter-lsp==2.2.5 - jupyter-server==2.15.0 - jupyter-server-terminals==0.5.3 - jupyterlab==4.1.5 - jupyterlab-pygments==0.3.0 - jupyterlab-server==2.27.3 - jupyterlab-widgets==1.1.11 - markupsafe==3.0.2 - mistune==3.1.3 - mock==5.2.0 - narwhals==1.32.0 - nbclient==0.10.2 - nbconvert==7.16.6 - nbformat==5.10.4 - nose==1.3.7 - notebook==7.1.3 - notebook-shim==0.2.4 - numpy==2.0.2 - overrides==7.7.0 - packaging==24.2 - pandas==2.2.3 - pandocfilters==1.5.1 - pexpect==4.9.0 - pickleshare==0.7.5 - platformdirs==4.3.7 - plotly==6.0.1 - pluggy==1.5.0 - prometheus-client==0.21.1 - ptyprocess==0.7.0 - pycparser==2.22 - pygments==2.19.1 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - python-json-logger==3.3.0 - pytz==2025.2 - pyyaml==6.0.2 - pyzmq==26.3.0 - referencing==0.36.2 - requests==2.32.3 - rfc3339-validator==0.1.4 - rfc3986-validator==0.1.1 - rpds-py==0.24.0 - send2trash==1.8.3 - simplegeneric==0.8.1 - six==1.17.0 - sniffio==1.3.1 - soupsieve==2.6 - terminado==0.18.1 - tinycss2==1.4.0 - tomli==2.2.1 - tornado==6.4.2 - traitlets==5.14.3 - types-python-dateutil==2.9.0.20241206 - typing-extensions==4.13.0 - tzdata==2025.2 - uri-template==1.3.0 - urllib3==2.3.0 - webcolors==24.11.1 - webencodings==0.5.1 - websocket-client==1.8.0 - widgetsnbextension==3.6.10 - zipp==3.21.0 prefix: /opt/conda/envs/sparkmagic
[ "tests/datawidgetstests/test_plotlygraphs.py::test_pie_graph_get_values_labels", "tests/datawidgetstests/test_plotlygraphs.py::test_data_graph_render" ]
[ "tests/datawidgetstests/test_plotlygraphs.py::test_graphbase_get_x_y_values" ]
[ "tests/datawidgetstests/test_plotlygraphs.py::test_graph_base_display_methods", "tests/datawidgetstests/test_plotlygraphs.py::test_pie_graph_display_methods", "tests/datawidgetstests/test_plotlygraphs.py::test_data_graph_display_methods" ]
[]
Modified BSD License
378
jupyter-incubator__sparkmagic-121
cb87d63ab3268c1c5bd63c9fc1f1e971d5a1fe31
2016-01-14 01:30:02
cb87d63ab3268c1c5bd63c9fc1f1e971d5a1fe31
diff --git a/remotespark/livyclientlib/livyclient.py b/remotespark/livyclientlib/livyclient.py index 5559856..eab5eed 100644 --- a/remotespark/livyclientlib/livyclient.py +++ b/remotespark/livyclientlib/livyclient.py @@ -10,16 +10,15 @@ class LivyClient(object): def __init__(self, session): self.logger = Log("LivyClient") - - execute_timeout_seconds = conf.execute_timeout_seconds() - self._session = session - self._session.create_sql_context() - self._execute_timeout_seconds = execute_timeout_seconds + self._execute_timeout_seconds = conf.execute_timeout_seconds() def __str__(self): return str(self._session) + def start(self): + self._session.create_sql_context() + def serialize(self): return self._session.get_state().to_dict() diff --git a/remotespark/livyclientlib/sparkcontroller.py b/remotespark/livyclientlib/sparkcontroller.py index 1254f48..fcc6ae6 100644 --- a/remotespark/livyclientlib/sparkcontroller.py +++ b/remotespark/livyclientlib/sparkcontroller.py @@ -72,6 +72,7 @@ class SparkController(object): session.start() livy_client = self.client_factory.build_client(session) self.client_manager.add_client(name, livy_client) + livy_client.start() def get_client_keys(self): return self.client_manager.get_sessions_list()
Shutdown pyspark kernel doesn't gaurantee that session has been deleted Repro Steps: - Open Pyspark kernel - Do any operation, 1+1 for example - shutdown it before getting the answer (while creating the SQL context & Hive one) - SSH the cluster, you'll find the session still existing For now, to delete it, you have to do that manually from ssh the cluster.
jupyter-incubator/sparkmagic
diff --git a/tests/test_livyclient.py b/tests/test_livyclient.py index 57870fa..1a2931a 100644 --- a/tests/test_livyclient.py +++ b/tests/test_livyclient.py @@ -6,16 +6,23 @@ from remotespark.utils.utils import get_connection_string from remotespark.utils.constants import Constants -def test_create_sql_context_automatically(): +def test_doesnt_create_sql_context_automatically(): mock_spark_session = MagicMock() LivyClient(mock_spark_session) + assert not mock_spark_session.create_sql_context.called + +def test_start_creates_sql_context(): + mock_spark_session = MagicMock() + client = LivyClient(mock_spark_session) + client.start() mock_spark_session.create_sql_context.assert_called_with() def test_execute_code(): mock_spark_session = MagicMock() client = LivyClient(mock_spark_session) + client.start() command = "command" client.execute(command) @@ -28,6 +35,7 @@ def test_execute_code(): def test_execute_sql(): mock_spark_session = MagicMock() client = LivyClient(mock_spark_session) + client.start() command = "command" client.execute_sql(command) @@ -40,6 +48,7 @@ def test_execute_sql(): def test_execute_hive(): mock_spark_session = MagicMock() client = LivyClient(mock_spark_session) + client.start() command = "command" client.execute_hive(command) @@ -63,6 +72,7 @@ def test_serialize(): session.get_state.return_value = LivySessionState(session_id, connection_string, kind, sql_created) client = LivyClient(session) + client.start() serialized = client.serialize() @@ -77,6 +87,7 @@ def test_serialize(): def test_close_session(): mock_spark_session = MagicMock() client = LivyClient(mock_spark_session) + client.start() client.close_session() @@ -89,6 +100,7 @@ def test_kind(): language_mock = PropertyMock(return_value=kind) type(mock_spark_session).kind = language_mock client = LivyClient(mock_spark_session) + client.start() l = client.kind @@ -101,6 +113,7 @@ def test_session_id(): session_id_mock = PropertyMock(return_value=session_id) type(mock_spark_session).id = session_id_mock client = LivyClient(mock_spark_session) + client.start() i = client.session_id diff --git a/tests/test_sparkcontroller.py b/tests/test_sparkcontroller.py index e522404..4aa9a12 100644 --- a/tests/test_sparkcontroller.py +++ b/tests/test_sparkcontroller.py @@ -41,7 +41,7 @@ def test_add_session(): name = "name" properties = {"kind": "spark"} connection_string = "url=http://location:port;username=name;password=word" - client = "client" + client = MagicMock() session = MagicMock() client_factory.create_session = MagicMock(return_value=session) client_factory.build_client = MagicMock(return_value=client) @@ -51,6 +51,7 @@ def test_add_session(): client_factory.create_session.assert_called_once_with(connection_string, properties, "-1", False) client_factory.build_client.assert_called_once_with(session) client_manager.add_client.assert_called_once_with(name, client) + client.start.assert_called_once_with() session.start.assert_called_once_with()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 2 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
anyio==4.9.0 argon2-cffi==23.1.0 argon2-cffi-bindings==21.2.0 arrow==1.3.0 async-lru==2.0.5 attrs==25.3.0 babel==2.17.0 beautifulsoup4==4.13.3 bleach==6.2.0 certifi==2025.1.31 cffi==1.17.1 charset-normalizer==3.4.1 comm==0.2.2 decorator==5.2.1 defusedxml==0.7.1 exceptiongroup==1.2.2 fastjsonschema==2.21.1 fqdn==1.5.1 h11==0.14.0 httpcore==1.0.7 httpx==0.28.1 idna==3.10 importlib_metadata==8.6.1 iniconfig==2.1.0 ipykernel==4.1.1 ipython==4.0.0 ipython-genutils==0.2.0 ipywidgets==7.8.5 isoduration==20.11.0 Jinja2==3.1.6 json5==0.10.0 jsonpointer==3.0.0 jsonschema==4.23.0 jsonschema-specifications==2024.10.1 jupyter-events==0.12.0 jupyter-lsp==2.2.5 jupyter_client==8.6.3 jupyter_core==5.7.2 jupyter_server==2.15.0 jupyter_server_terminals==0.5.3 jupyterlab==4.1.5 jupyterlab_pygments==0.3.0 jupyterlab_server==2.27.3 jupyterlab_widgets==1.1.11 MarkupSafe==3.0.2 mistune==3.1.3 mock==5.2.0 narwhals==1.32.0 nbclient==0.10.2 nbconvert==7.16.6 nbformat==5.10.4 nose==1.3.7 notebook==7.1.3 notebook_shim==0.2.4 numpy==2.0.2 overrides==7.7.0 packaging==24.2 pandas==2.2.3 pandocfilters==1.5.1 pexpect==4.9.0 pickleshare==0.7.5 platformdirs==4.3.7 plotly==6.0.1 pluggy==1.5.0 prometheus_client==0.21.1 ptyprocess==0.7.0 pycparser==2.22 Pygments==2.19.1 pytest==8.3.5 python-dateutil==2.9.0.post0 python-json-logger==3.3.0 pytz==2025.2 PyYAML==6.0.2 pyzmq==26.3.0 referencing==0.36.2 -e git+https://github.com/jupyter-incubator/sparkmagic.git@cb87d63ab3268c1c5bd63c9fc1f1e971d5a1fe31#egg=remotespark requests==2.32.3 rfc3339-validator==0.1.4 rfc3986-validator==0.1.1 rpds-py==0.24.0 Send2Trash==1.8.3 simplegeneric==0.8.1 six==1.17.0 sniffio==1.3.1 soupsieve==2.6 terminado==0.18.1 tinycss2==1.4.0 tomli==2.2.1 tornado==6.4.2 traitlets==5.14.3 types-python-dateutil==2.9.0.20241206 typing_extensions==4.13.0 tzdata==2025.2 uri-template==1.3.0 urllib3==2.3.0 webcolors==24.11.1 webencodings==0.5.1 websocket-client==1.8.0 widgetsnbextension==3.6.10 zipp==3.21.0
name: sparkmagic channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - anyio==4.9.0 - argon2-cffi==23.1.0 - argon2-cffi-bindings==21.2.0 - arrow==1.3.0 - async-lru==2.0.5 - attrs==25.3.0 - babel==2.17.0 - beautifulsoup4==4.13.3 - bleach==6.2.0 - certifi==2025.1.31 - cffi==1.17.1 - charset-normalizer==3.4.1 - comm==0.2.2 - decorator==5.2.1 - defusedxml==0.7.1 - exceptiongroup==1.2.2 - fastjsonschema==2.21.1 - fqdn==1.5.1 - h11==0.14.0 - httpcore==1.0.7 - httpx==0.28.1 - idna==3.10 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - ipykernel==4.1.1 - ipython==4.0.0 - ipython-genutils==0.2.0 - ipywidgets==7.8.5 - isoduration==20.11.0 - jinja2==3.1.6 - json5==0.10.0 - jsonpointer==3.0.0 - jsonschema==4.23.0 - jsonschema-specifications==2024.10.1 - jupyter-client==8.6.3 - jupyter-core==5.7.2 - jupyter-events==0.12.0 - jupyter-lsp==2.2.5 - jupyter-server==2.15.0 - jupyter-server-terminals==0.5.3 - jupyterlab==4.1.5 - jupyterlab-pygments==0.3.0 - jupyterlab-server==2.27.3 - jupyterlab-widgets==1.1.11 - markupsafe==3.0.2 - mistune==3.1.3 - mock==5.2.0 - narwhals==1.32.0 - nbclient==0.10.2 - nbconvert==7.16.6 - nbformat==5.10.4 - nose==1.3.7 - notebook==7.1.3 - notebook-shim==0.2.4 - numpy==2.0.2 - overrides==7.7.0 - packaging==24.2 - pandas==2.2.3 - pandocfilters==1.5.1 - pexpect==4.9.0 - pickleshare==0.7.5 - platformdirs==4.3.7 - plotly==6.0.1 - pluggy==1.5.0 - prometheus-client==0.21.1 - ptyprocess==0.7.0 - pycparser==2.22 - pygments==2.19.1 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - python-json-logger==3.3.0 - pytz==2025.2 - pyyaml==6.0.2 - pyzmq==26.3.0 - referencing==0.36.2 - requests==2.32.3 - rfc3339-validator==0.1.4 - rfc3986-validator==0.1.1 - rpds-py==0.24.0 - send2trash==1.8.3 - simplegeneric==0.8.1 - six==1.17.0 - sniffio==1.3.1 - soupsieve==2.6 - terminado==0.18.1 - tinycss2==1.4.0 - tomli==2.2.1 - tornado==6.4.2 - traitlets==5.14.3 - types-python-dateutil==2.9.0.20241206 - typing-extensions==4.13.0 - tzdata==2025.2 - uri-template==1.3.0 - urllib3==2.3.0 - webcolors==24.11.1 - webencodings==0.5.1 - websocket-client==1.8.0 - widgetsnbextension==3.6.10 - zipp==3.21.0 prefix: /opt/conda/envs/sparkmagic
[ "tests/test_livyclient.py::test_doesnt_create_sql_context_automatically", "tests/test_livyclient.py::test_start_creates_sql_context", "tests/test_livyclient.py::test_execute_code", "tests/test_livyclient.py::test_execute_sql", "tests/test_livyclient.py::test_execute_hive", "tests/test_livyclient.py::test_serialize", "tests/test_livyclient.py::test_close_session", "tests/test_livyclient.py::test_kind", "tests/test_livyclient.py::test_session_id" ]
[ "tests/test_sparkcontroller.py::test_add_session", "tests/test_sparkcontroller.py::test_add_session_skip", "tests/test_sparkcontroller.py::test_delete_session", "tests/test_sparkcontroller.py::test_cleanup", "tests/test_sparkcontroller.py::test_run_cell", "tests/test_sparkcontroller.py::test_get_client_keys", "tests/test_sparkcontroller.py::test_get_all_sessions", "tests/test_sparkcontroller.py::test_cleanup_endpoint", "tests/test_sparkcontroller.py::test_delete_session_by_id_existent", "tests/test_sparkcontroller.py::test_delete_session_by_id_non_existent" ]
[]
[]
Modified BSD License
379
joblib__joblib-297
4a9c63d7984bc5e630722b8bf72c0c720a6bd5c0
2016-01-14 10:30:19
40341615cc2600675ce7457d9128fb030f6f89fa
ogrisel: LGTM, can you please add a changelog entry and merge?
diff --git a/CHANGES.rst b/CHANGES.rst index e08fc89..66531c1 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -4,6 +4,11 @@ Latest changes Release 0.9.4 ------------- +Loïc Estève + + FIX for raising non inheritable exceptions in a Parallel call. See + https://github.com/joblib/joblib/issues/269 for more details. + Alexandre Abadie FIX joblib.hash error with mixed types sets and dicts containing mixed diff --git a/joblib/my_exceptions.py b/joblib/my_exceptions.py index 500fcd7..9d26cb5 100644 --- a/joblib/my_exceptions.py +++ b/joblib/my_exceptions.py @@ -64,14 +64,20 @@ def _mk_exception(exception, name=None): this_exception = _exception_mapping[this_name] else: if exception is Exception: - # We cannot create a subclass: we are already a trivial - # subclass + # JoblibException is already a subclass of Exception. No + # need to use multiple inheritance return JoblibException, this_name - elif issubclass(exception, JoblibException): - return JoblibException, JoblibException.__name__ - this_exception = type( - this_name, (JoblibException, exception), {}) - _exception_mapping[this_name] = this_exception + try: + this_exception = type( + this_name, (JoblibException, exception), {}) + _exception_mapping[this_name] = this_exception + except TypeError: + # This happens if "Cannot create a consistent method + # resolution order", e.g. because 'exception' is a + # subclass of JoblibException or 'exception' is not an + # acceptable base class + this_exception = JoblibException + return this_exception, this_name
Error propagating exceptions on-non subclassable types I was using joblib to run 8 optimizations using nlopt in parallel, and got this error: ``` --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-138-0fe3676d9e18> in <module>() 31 for i in range(iterations): 32 pool = Parallel(n_jobs=max_jobs, verbose=5) ---> 33 out = pool(delayed(nlopt_optimization)(job_id) for job_id in range(max_jobs)) 34 solutions.extend(out) 35 /home/federico/anaconda/lib/python2.7/site-packages/joblib/parallel.pyc in __call__(self, iterable) 658 # consumption. 659 self._iterating = False --> 660 self.retrieve() 661 # Make sure that we get a last message telling us we are done 662 elapsed_time = time.time() - self._start_time /home/federico/anaconda/lib/python2.7/site-packages/joblib/parallel.pyc in retrieve(self) 540 ) 541 # Convert this to a JoblibException --> 542 exception_type = _mk_exception(exception.etype)[0] 543 raise exception_type(report) 544 raise exception /home/federico/anaconda/lib/python2.7/site-packages/joblib/my_exceptions.pyc in _mk_exception(exception, name) 67 this_exception = type(this_name, (exception, JoblibException), 68 dict(__repr__=JoblibException.__repr__, ---> 69 __str__=JoblibException.__str__), 70 ) 71 _exception_mapping[this_name] = this_exception TypeError: type 'nlopt.ForcedStop' is not an acceptable base type ``` My understanding is because a bunch of nlopt types are actually all generated using SWIG, and cannot be naturally inherited. Should there be a check before joblib tries creating a nice custom exception that the type can indeed be inherited from?
joblib/joblib
diff --git a/joblib/test/test_my_exceptions.py b/joblib/test/test_my_exceptions.py index 7c396ca..b283434 100644 --- a/joblib/test/test_my_exceptions.py +++ b/joblib/test/test_my_exceptions.py @@ -42,6 +42,16 @@ def test_inheritance_special_cases(): assert_true(my_exceptions._mk_exception(exception)[0] is my_exceptions.JoblibException) + # Non-inheritable exception classes should be mapped to + # JoblibException by _mk_exception. That can happen with classes + # generated with SWIG. See + # https://github.com/joblib/joblib/issues/269 for a concrete + # example. + non_inheritable_classes = [type(lambda: None), bool] + for exception in non_inheritable_classes: + assert_true(my_exceptions._mk_exception(exception)[0] is + my_exceptions.JoblibException) + def test__mk_exception(): # Check that _mk_exception works on a bunch of different exceptions
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 2 }
0.9
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "nose", "coverage", "pytest" ], "pre_install": null, "python": "3.5", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 coverage==6.2 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work -e git+https://github.com/joblib/joblib.git@4a9c63d7984bc5e630722b8bf72c0c720a6bd5c0#egg=joblib more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work nose==1.3.7 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: joblib channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==6.2 - nose==1.3.7 prefix: /opt/conda/envs/joblib
[ "joblib/test/test_my_exceptions.py::test_inheritance_special_cases" ]
[]
[ "joblib/test/test_my_exceptions.py::test_inheritance", "joblib/test/test_my_exceptions.py::test__mk_exception" ]
[]
BSD 3-Clause "New" or "Revised" License
380
OnroerendErfgoed__pyramid_urireferencer-11
b9c5617a6f21cc081232826cee80fa7b2bf050e2
2016-01-14 15:13:21
067293d191dc9dd4f7c2554f71bf0c730786a872
diff --git a/pyramid_urireferencer/models.py b/pyramid_urireferencer/models.py index 8ef53f0..9bbd245 100644 --- a/pyramid_urireferencer/models.py +++ b/pyramid_urireferencer/models.py @@ -14,12 +14,13 @@ class RegistryResponse: :param int count: How many references were found? :param list applications: A list of application results. ''' + def __init__(self, query_uri, success, has_references, count, applications): - self.query_uri = query_uri - self.success = success - self.has_references = has_references - self.count = count - self.applications = applications + self.query_uri = query_uri + self.success = success + self.has_references = has_references + self.count = count + self.applications = applications @staticmethod def load_from_json(data): @@ -34,9 +35,19 @@ class RegistryResponse: r.success = data['success'] r.has_references = data['has_references'] r.count = data['count'] - r.applications = [ApplicationResponse.load_from_json(a) for a in data['applications']] if data['applications'] is not None else None + r.applications = [ApplicationResponse.load_from_json(a) for a in data['applications']] if data[ + 'applications'] is not None else None return r + def to_json(self): + return { + "query_uri": self.query_uri, + "success": self.success, + "has_references": self.has_references, + "count": self.count, + "applications": [app.to_json() for app in self.applications] + } + class ApplicationResponse: ''' @@ -52,14 +63,15 @@ class ApplicationResponse: :param list items: A list of items that have a reference to the \ uri under survey. Limited to 5 items for performance reasons. ''' + def __init__(self, title, uri, service_url, success, has_references, count, items): - self.title = title - self.uri = uri - self.service_url = service_url - self.success = success - self.has_references = has_references - self.count = count - self.items = items + self.title = title + self.uri = uri + self.service_url = service_url + self.success = success + self.has_references = has_references + self.count = count + self.items = items @staticmethod def load_from_json(data): @@ -79,6 +91,17 @@ class ApplicationResponse: r.items = [Item.load_from_json(a) for a in data['items']] if data['items'] is not None else None return r + def to_json(self): + return { + "title": self.title, + "uri": self.uri, + "service_url": self.service_url, + "success": self.success, + "has_references": self.has_references, + "count": self.count, + "items": [item.to_json() for item in self.items] + } + class Item: ''' @@ -87,6 +110,7 @@ class Item: :param string title: Title of the item. :param string uri: Uri of the item. ''' + def __init__(self, title, uri): self.title = title self.uri = uri @@ -103,3 +127,9 @@ class Item: i.uri = data['uri'] i.title = data['title'] return i + + def to_json(self): + return { + "title": self.title, + "uri": self.uri + } diff --git a/pyramid_urireferencer/protected_resources.py b/pyramid_urireferencer/protected_resources.py index 2814254..db4556a 100644 --- a/pyramid_urireferencer/protected_resources.py +++ b/pyramid_urireferencer/protected_resources.py @@ -8,6 +8,7 @@ that might be used in external applications. from pyramid.httpexceptions import ( HTTPInternalServerError, HTTPConflict) +from webob import Response import pyramid_urireferencer @@ -26,19 +27,41 @@ def protected_operation(fn): :raises pyramid.httpexceptions.HTTPInternalServerError: Raised when we were unable to check that the URI is no longer being used. ''' + def advice(parent_object, *args, **kw): id = parent_object.request.matchdict['id'] referencer = pyramid_urireferencer.get_referencer(parent_object.request.registry) uri = parent_object.uri_template.format(id) registery_response = referencer.is_referenced(uri) if registery_response.has_references: - raise HTTPConflict( - detail="Urireferencer: The uri {0} is still in use by other applications: {1}". - format(uri, ', '.join([app_response.title for app_response in registery_response.applications - if app_response.has_references]))) + if parent_object.request.headers.get("Accept", None) == "application/json": + response = Response() + response.status_code = 409 + response_json = { + "message": "The uri {0} is still in use by other applications. A total of {1} references have been found.".format( + uri, registery_response.count), + "errors": [], + "registry_response": registery_response.to_json() + } + for app_response in registery_response.applications: + if app_response.has_references: + error_string = "{0}: {1} references found, such as {2}"\ + .format(app_response.uri, + app_response.count, + ', '.join([i.uri for i in app_response.items])) + response_json["errors"].append(error_string) + response.json_body = response_json + response.content_type = 'application/json' + return response + else: + raise HTTPConflict( + detail="Urireferencer: The uri {0} is still in use by other applications. A total of {1} references have been found in the following applications: {2}". + format(uri, registery_response.count, + ', '.join([app_response.title for app_response in registery_response.applications + if app_response.has_references]))) elif not registery_response.success: raise HTTPInternalServerError( - detail="Urireferencer: Something went wrong while retrieving references of the uri {0}".format(uri)) + detail="Urireferencer: Something went wrong while retrieving references of the uri {0}".format(uri)) return fn(parent_object, *args, **kw) - return advice \ No newline at end of file + return advice diff --git a/requirements-dev.txt b/requirements-dev.txt index 22621e2..10814ca 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -7,6 +7,7 @@ pytest-cov==2.1.0 webtest==2.0.18 httpretty==0.8.10 coveralls +mock==1.3.0 #wheel wheel==0.26.0
Error message when a resource can't be deleted. Currently, when the protected resources decorator finds that a resource is still in use, it raise a 409 Conflict with a text string as body. The current error message looks like: *Urireferencer: The uri https://id.erfgoed.net/actoren/1 is still in use by other applications: https://inventaris.onroerenderfgoed.be, https://besluiten.onroerenderfgoed.be* I would like to see this changed to: *The uri https://id.erfgoed.net/actoren/1 is still in use by other applications. A total of 8 references have been found in the following applications: https://inventaris.onroerenderfgoed.be, https://besluiten.onroerenderfgoed.be* I would also like a more custom error message when request.accept = 'application/json' ```json { "message": "The uri https://id.erfgoed.net/actoren/1 is still in use by other applications. A total of 8 references have been found.", "errors": [ "https://inventaris.onroerenderfgoed.be: 6 references found, such as https://id.erfgoed.net/erfgoedobjecten/56, https://id.erfgoed.net/aanduidingsobjecten/889, https://id.erfgoed.net/aanduidingsobjecten/12487, https://id.erfgoed.net/gebeurtenissen/965, https://id.erfgoed.net/themas/123.", "https://besluiten.onroerenderfgoed.be: 2 references found, such as https://id.erfgoed.net/besluiten/1896, https://id.erfgoed.net/besluiten/23." ], "registry_response": <Include the response the registry passed so the client can create custom error messages> } ```
OnroerendErfgoed/pyramid_urireferencer
diff --git a/tests/test_protected_resources.py b/tests/test_protected_resources.py index dc47a81..1989235 100644 --- a/tests/test_protected_resources.py +++ b/tests/test_protected_resources.py @@ -2,23 +2,34 @@ import unittest from pyramid import testing from pyramid_urireferencer.protected_resources import protected_operation -from pyramid_urireferencer.models import RegistryResponse +from pyramid_urireferencer.models import RegistryResponse, Item, ApplicationResponse from pyramid.httpexceptions import HTTPConflict, HTTPInternalServerError + try: from unittest.mock import Mock, patch except ImportError: from mock import Mock, patch # pragma: no cover + def get_app(nr): - class Object(object): - pass - a = Object() - a.title = 'App {0}'.format(nr) - a.has_references = True if nr == 1 else False + items = [] + if nr == 1: + items.append(Item(uri="https://dev-besluiten.onroerenderfgoed.be/besluiten/152", title="Mijn besluit")) + items.append(Item(uri="https://dev-besluiten.onroerenderfgoed.be/besluiten/154", + title="Vaststelling van de inventaris van het Bouwkundig Erfgoed op 28 november 2014")) + a = ApplicationResponse( + title='App {0}'.format(nr), + uri="https://dev-app-{0}.onroerenderfgoed.be/".format(nr), + service_url="https://dev-app-{0}.onroerenderfgoed.be/references".format(nr), + success=True, + has_references=True if nr == 1 else False, + count=2 if nr == 1 else 0, + items=items + ) return a -class DummyParent(object): +class DummyParent(object): def __init__(self): self.request = testing.DummyRequest() config = testing.setUp(request=self.request) @@ -37,7 +48,6 @@ class DummyParent(object): class ProtectedTests(unittest.TestCase): - def setUp(self): pass @@ -52,7 +62,8 @@ class ProtectedTests(unittest.TestCase): @patch('pyramid_urireferencer.protected_resources.pyramid_urireferencer.Referencer.is_referenced') def test_protected_operation_409(self, is_referenced_mock): dummy = DummyParent() - is_referenced_mock.return_value = RegistryResponse('https://id.erfgoed.net/resources/1', True, True, 10, [get_app(1), get_app(2)]) + is_referenced_mock.return_value = RegistryResponse('https://id.erfgoed.net/resources/1', True, True, 10, + [get_app(1), get_app(2)]) self.assertRaises(HTTPConflict, dummy.protected_dummy) is_referenced_call = is_referenced_mock.mock_calls[0] self.assertEqual('https://id.erfgoed.net/resources/1', is_referenced_call[1][0]) @@ -60,16 +71,32 @@ class ProtectedTests(unittest.TestCase): @patch('pyramid_urireferencer.protected_resources.pyramid_urireferencer.Referencer.is_referenced') def test_protected_operation_409_2(self, is_referenced_mock): dummy = DummyParent() - is_referenced_mock.return_value = RegistryResponse('https://id.erfgoed.net/resources/1', False, True, 10, [get_app(1), get_app(2)]) + is_referenced_mock.return_value = RegistryResponse('https://id.erfgoed.net/resources/1', False, True, 10, + [get_app(1), get_app(2)]) self.assertRaises(HTTPConflict, dummy.protected_dummy) is_referenced_call = is_referenced_mock.mock_calls[0] self.assertEqual('https://id.erfgoed.net/resources/1', is_referenced_call[1][0]) + @patch('pyramid_urireferencer.protected_resources.pyramid_urireferencer.Referencer.is_referenced') + def test_protected_operation_409_json(self, is_referenced_mock): + dummy = DummyParent() + dummy.request.headers = {"Accept": "application/json"} + is_referenced_mock.return_value = RegistryResponse('https://id.erfgoed.net/resources/1', False, True, 2, + [get_app(1), get_app(2)]) + res = dummy.protected_dummy() + self.assertEqual(409, res.status_code) + self.assertEqual(res.json_body["message"], + "The uri https://id.erfgoed.net/resources/1 is still in use by other applications. A total of 2 references have been found.") + self.assertEqual("application/json", res.content_type) + + is_referenced_call = is_referenced_mock.mock_calls[0] + self.assertEqual('https://id.erfgoed.net/resources/1', is_referenced_call[1][0]) + @patch('pyramid_urireferencer.protected_resources.pyramid_urireferencer.Referencer.is_referenced') def test_protected_operation_500(self, is_referenced_mock): dummy = DummyParent() - is_referenced_mock.return_value = RegistryResponse('https://id.erfgoed.net/resources/1', False, None, None, None) + is_referenced_mock.return_value = RegistryResponse('https://id.erfgoed.net/resources/1', False, None, None, + None) self.assertRaises(HTTPInternalServerError, dummy.protected_dummy) is_referenced_call = is_referenced_mock.mock_calls[0] self.assertEqual('https://id.erfgoed.net/resources/1', is_referenced_call[1][0]) -
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 3 }
0.4
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest", "pytest-cov", "webtest", "httpretty", "coveralls", "wheel" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
beautifulsoup4==4.13.3 coverage==7.8.0 coveralls==4.0.1 docopt==0.6.2 exceptiongroup==1.2.2 httpretty==1.1.4 iniconfig==2.1.0 packaging==24.2 PasteDeploy==3.1.0 pluggy==1.5.0 pyramid==1.5.7 -e git+https://github.com/OnroerendErfgoed/pyramid_urireferencer.git@b9c5617a6f21cc081232826cee80fa7b2bf050e2#egg=pyramid_urireferencer pytest==8.3.5 pytest-cov==6.0.0 repoze.lru==0.7 requests==2.7.0 soupsieve==2.6 tomli==2.2.1 translationstring==1.4 typing_extensions==4.13.0 venusian==3.1.1 waitress==3.0.2 WebOb==1.8.9 WebTest==3.0.4 zope.deprecation==5.1 zope.interface==7.2
name: pyramid_urireferencer channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - beautifulsoup4==4.13.3 - coverage==7.8.0 - coveralls==4.0.1 - docopt==0.6.2 - exceptiongroup==1.2.2 - httpretty==1.1.4 - iniconfig==2.1.0 - packaging==24.2 - pastedeploy==3.1.0 - pluggy==1.5.0 - pyramid==1.5.7 - pytest==8.3.5 - pytest-cov==6.0.0 - repoze-lru==0.7 - requests==2.7.0 - soupsieve==2.6 - tomli==2.2.1 - translationstring==1.4 - typing-extensions==4.13.0 - venusian==3.1.1 - waitress==3.0.2 - webob==1.8.9 - webtest==3.0.4 - zope-deprecation==5.1 - zope-interface==7.2 prefix: /opt/conda/envs/pyramid_urireferencer
[ "tests/test_protected_resources.py::ProtectedTests::test_protected_operation_409_json" ]
[]
[ "tests/test_protected_resources.py::ProtectedTests::test_protected_operation", "tests/test_protected_resources.py::ProtectedTests::test_protected_operation_409", "tests/test_protected_resources.py::ProtectedTests::test_protected_operation_409_2", "tests/test_protected_resources.py::ProtectedTests::test_protected_operation_500" ]
[]
MIT License
381
jupyter-incubator__sparkmagic-125
ac43b2838efaae766a7071a79699b9b192899dd2
2016-01-16 02:37:09
ac43b2838efaae766a7071a79699b9b192899dd2
diff --git a/remotespark/livyclientlib/livyclient.py b/remotespark/livyclientlib/livyclient.py index eab5eed..6d56323 100644 --- a/remotespark/livyclientlib/livyclient.py +++ b/remotespark/livyclientlib/livyclient.py @@ -22,6 +22,12 @@ class LivyClient(object): def serialize(self): return self._session.get_state().to_dict() + def get_logs(self): + try: + return True, self._session.logs + except ValueError as err: + return False, "{}".format(err) + def execute(self, commands): self._session.wait_for_idle(self._execute_timeout_seconds) return self._session.execute(commands) diff --git a/remotespark/livyclientlib/livysession.py b/remotespark/livyclientlib/livysession.py index 9993ad6..73ddcb3 100644 --- a/remotespark/livyclientlib/livysession.py +++ b/remotespark/livyclientlib/livysession.py @@ -112,16 +112,10 @@ class LivySession(object): def kind(self): return self._state.kind - def refresh_status(self): - (status, logs) = self._get_latest_status_and_logs() - - if status in Constants.possible_session_status: - self._status = status - self._logs = logs - else: - raise ValueError("Status '{}' not supported by session.".format(status)) - - return self._status + @property + def logs(self): + self._refresh_logs() + return self._logs @property def http_client(self): @@ -158,14 +152,14 @@ class LivySession(object): Parameters: seconds_to_wait : number of seconds to wait before giving up. """ - self.refresh_status() + self._refresh_status() current_status = self._status if current_status == Constants.idle_session_status: return if current_status in Constants.final_status: error = "Session {} unexpectedly reached final status {}. See logs:\n{}"\ - .format(self.id, current_status, "\n".join(self._logs)) + .format(self.id, current_status, "\n".join(self.logs)) self.logger.error(error) raise LivyUnexpectedStatusError(error) @@ -185,18 +179,31 @@ class LivySession(object): def _statements_url(self): return "/sessions/{}/statements".format(self.id) - def _get_latest_status_and_logs(self): - """Get current session state. Network call.""" - r = self._http_client.get("/sessions", [200]) - sessions = r.json()["sessions"] - filtered_sessions = [s for s in sessions if s["id"] == int(self.id)] + def _refresh_status(self): + status = self._get_latest_status() + + if status in Constants.possible_session_status: + self._status = status + else: + raise ValueError("Status '{}' not supported by session.".format(status)) + + return self._status + + def _refresh_logs(self): + self._logs = self._get_latest_logs() + + def _get_latest_status(self): + r = self._http_client.get("/sessions/{}".format(self.id), [200]) + session = r.json() - if len(filtered_sessions) != 1: - raise ValueError("Expected one session of id {} and got {} sessions." - .format(self.id, len(filtered_sessions))) - - session = filtered_sessions[0] - return session['state'], session['log'] + return session['state'] + + def _get_latest_logs(self): + r = self._http_client.get("/sessions/{}/log?from=0".format(self.id), [200]) + log_array = r.json()['log'] + logs = "\n".join(log_array) + + return logs def _get_statement_output(self, statement_id): statement_running = True diff --git a/remotespark/livyclientlib/sparkcontroller.py b/remotespark/livyclientlib/sparkcontroller.py index 75b4854..df0924a 100644 --- a/remotespark/livyclientlib/sparkcontroller.py +++ b/remotespark/livyclientlib/sparkcontroller.py @@ -21,6 +21,10 @@ class SparkController(object): else: self.client_manager = ClientManager() + def get_logs(self, client_name=None): + client_to_use = self.get_client_by_name_or_default(client_name) + return client_to_use.get_logs() + def run_cell(self, cell, client_name=None): client_to_use = self.get_client_by_name_or_default(client_name) return client_to_use.execute(cell) @@ -40,7 +44,7 @@ class SparkController(object): session_list = [self.client_factory.create_session(self.ipython_display, connection_string, {"kind": s["kind"]}, s["id"]) for s in sessions] for s in session_list: - s.refresh_status() + s._refresh_status() return session_list def get_all_sessions_endpoint_info(self, connection_string): diff --git a/remotespark/remotesparkmagics.py b/remotespark/remotesparkmagics.py index 1a9687b..c763f16 100644 --- a/remotespark/remotesparkmagics.py +++ b/remotespark/remotesparkmagics.py @@ -39,7 +39,7 @@ class RemoteSparkMagics(Magics): self.logger.debug("Will serialize to {}.".format(path_to_serialize)) - self.spark_controller = SparkController(serialize_path=path_to_serialize) + self.spark_controller = SparkController(self.ipython_display, serialize_path=path_to_serialize) else: self.logger.debug("Serialization NOT enabled.") except KeyError: @@ -69,6 +69,7 @@ class RemoteSparkMagics(Magics): When the SQL context is used, the result will be a Pandas dataframe of a sample of the results. If invoked with no subcommand, the cell will be executed against the specified session. + Subcommands ----------- info @@ -89,11 +90,14 @@ class RemoteSparkMagics(Magics): e.g. `%%spark config {"driverMemory":"1000M", "executorCores":4}` run Run Spark code against a session. - e.g. `%%spark -e testsession` will execute the cell code against the testsession previously created - e.g. `%%spark -e testsession -c sql` will execute the SQL code against the testsession previously created - e.g. `%%spark -e testsession -c sql -o my_var` will execute the SQL code against the testsession + e.g. `%%spark -s testsession` will execute the cell code against the testsession previously created + e.g. `%%spark -s testsession -c sql` will execute the SQL code against the testsession previously created + e.g. `%%spark -s testsession -c sql -o my_var` will execute the SQL code against the testsession previously created and store the pandas dataframe created in the my_var variable in the Python environment. + logs + Returns the logs for a given session. + e.g. `%%spark logs -s testsession` will return the logs for the testsession previously created delete Delete a Livy session. Argument is the name of the session to be deleted. e.g. `%%spark delete defaultlivy` @@ -107,81 +111,94 @@ class RemoteSparkMagics(Magics): subcommand = args.command[0].lower() - # info - if subcommand == "info": - if len(args.command) == 2: - connection_string = args.command[1] - info_sessions = self.spark_controller.get_all_sessions_endpoint_info(connection_string) - self._print_endpoint_info(info_sessions) - elif len(args.command) == 1: - self._print_local_info() - else: - raise ValueError("Subcommand 'info' requires no value or a connection string to show all sessions. " - "{}".format(usage)) - # config - elif subcommand == "config": - # Would normally do " ".join(args.command[1:]) but parse_argstring removes quotes... - rest_of_line = user_input[7:] - conf.override(conf.session_configs.__name__, json.loads(rest_of_line)) - # add - elif subcommand == "add": - if len(args.command) != 4 and len(args.command) != 5: - raise ValueError("Subcommand 'add' requires three or four arguments. {}".format(usage)) - - name = args.command[1].lower() - language = args.command[2].lower() - connection_string = args.command[3] - - if len(args.command) == 5: - skip = args.command[4].lower() == "skip" - else: - skip = False - - properties = copy.deepcopy(conf.session_configs()) - properties["kind"] = self._get_livy_kind(language) + try: + # info + if subcommand == "info": + if len(args.command) == 2: + connection_string = args.command[1] + info_sessions = self.spark_controller.get_all_sessions_endpoint_info(connection_string) + self._print_endpoint_info(info_sessions) + elif len(args.command) == 1: + self._print_local_info() + else: + raise ValueError("Subcommand 'info' requires no value or a connection string to show all sessions.\n" + "{}".format(usage)) + # config + elif subcommand == "config": + # Would normally do " ".join(args.command[1:]) but parse_argstring removes quotes... + rest_of_line = user_input[7:] + conf.override(conf.session_configs.__name__, json.loads(rest_of_line)) + # add + elif subcommand == "add": + if len(args.command) != 4 and len(args.command) != 5: + raise ValueError("Subcommand 'add' requires three or four arguments.\n{}".format(usage)) - self.spark_controller.add_session(name, connection_string, skip, properties) - # delete - elif subcommand == "delete": - if len(args.command) == 2: name = args.command[1].lower() - self.spark_controller.delete_session_by_name(name) - elif len(args.command) == 3: - connection_string = args.command[1] - session_id = args.command[2] - self.spark_controller.delete_session_by_id(connection_string, session_id) - else: - raise ValueError("Subcommand 'delete' requires a session name, or a connection string and id. {}" - .format(usage)) - # cleanup - elif subcommand == "cleanup": - if len(args.command) == 2: - connection_string = args.command[1] - self.spark_controller.cleanup_endpoint(connection_string) - elif len(args.command) == 1: - self.spark_controller.cleanup() - else: - raise ValueError("Subcommand 'cleanup' requires no value or a connection string to clean up sessions. " - "{}".format(usage)) - # run - elif len(subcommand) == 0: - if args.context == Constants.context_name_spark: - (success, out) = self.spark_controller.run_cell(cell, args.session) - if success: - self.ipython_display.write(out) + language = args.command[2].lower() + connection_string = args.command[3] + + if len(args.command) == 5: + skip = args.command[4].lower() == "skip" + else: + skip = False + + properties = copy.deepcopy(conf.session_configs()) + properties["kind"] = self._get_livy_kind(language) + + self.spark_controller.add_session(name, connection_string, skip, properties) + # delete + elif subcommand == "delete": + if len(args.command) == 2: + name = args.command[1].lower() + self.spark_controller.delete_session_by_name(name) + elif len(args.command) == 3: + connection_string = args.command[1] + session_id = args.command[2] + self.spark_controller.delete_session_by_id(connection_string, session_id) + else: + raise ValueError("Subcommand 'delete' requires a session name or a connection string and id.\n{}" + .format(usage)) + # cleanup + elif subcommand == "cleanup": + if len(args.command) == 2: + connection_string = args.command[1] + self.spark_controller.cleanup_endpoint(connection_string) + elif len(args.command) == 1: + self.spark_controller.cleanup() else: - self.ipython_display.send_error(out) - elif args.context == Constants.context_name_sql: - return self._execute_against_context_that_returns_df(self.spark_controller.run_cell_sql, cell, - args.session, args.output) - elif args.context == Constants.context_name_hive: - return self._execute_against_context_that_returns_df(self.spark_controller.run_cell_hive, cell, - args.session, args.output) + raise ValueError("Subcommand 'cleanup' requires no further values or a connection string to clean up " + "sessions.\n{}".format(usage)) + # logs + elif subcommand == "logs": + if len(args.command) == 1: + (success, out) = self.spark_controller.get_logs(args.session) + if success: + self.ipython_display.write(out) + else: + self.ipython_display.send_error(out) + else: + raise ValueError("Subcommand 'logs' requires no further values.\n{}".format(usage)) + # run + elif len(subcommand) == 0: + if args.context == Constants.context_name_spark: + (success, out) = self.spark_controller.run_cell(cell, args.session) + if success: + self.ipython_display.write(out) + else: + self.ipython_display.send_error(out) + elif args.context == Constants.context_name_sql: + return self._execute_against_context_that_returns_df(self.spark_controller.run_cell_sql, cell, + args.session, args.output) + elif args.context == Constants.context_name_hive: + return self._execute_against_context_that_returns_df(self.spark_controller.run_cell_hive, cell, + args.session, args.output) + else: + raise ValueError("Context '{}' not found".format(args.context)) + # error else: - raise ValueError("Context '{}' not found".format(args.context)) - # error - else: - raise ValueError("Subcommand '{}' not found. {}".format(subcommand, usage)) + raise ValueError("Subcommand '{}' not found. {}".format(subcommand, usage)) + except ValueError as err: + self.ipython_display.send_error("{}".format(err)) def _execute_against_context_that_returns_df(self, method, cell, session, output_var): try: diff --git a/remotespark/sparkkernelbase.py b/remotespark/sparkkernelbase.py index 626b74c..ab3dd5e 100644 --- a/remotespark/sparkkernelbase.py +++ b/remotespark/sparkkernelbase.py @@ -17,6 +17,7 @@ class SparkKernelBase(IPythonKernel): info_command = "info" delete_command = "delete" clean_up_command = "cleanup" + logs_command = "logs" force_flag = "f" @@ -78,10 +79,10 @@ class SparkKernelBase(IPythonKernel): if self._session_started: if self.force_flag not in flags: self._show_user_error("A session has already been started. In order to modify the Spark configura" - "tion, please provide the '-f' flag at the beginning of the config magic:\n" - "\te.g. `%config -f {}`\n\nNote that this will kill the current session and" - " will create a new one with the configuration provided. All previously run " - "commands in the session will be lost.") + "tion, please provide the '-f' flag at the beginning of the config magic:\n" + "\te.g. `%config -f {}`\n\nNote that this will kill the current session and" + " will create a new one with the configuration provided. All previously run " + "commands in the session will be lost.") code_to_run = "" else: restart_session = True @@ -118,6 +119,12 @@ class SparkKernelBase(IPythonKernel): code_to_run = "%spark cleanup {}".format(self.connection_string) return self._run_without_session(code_to_run, silent, store_history, user_expressions, allow_stdin) + elif subcommand == self.logs_command: + if self._session_started: + code_to_run = "%spark logs" + else: + code_to_run = "print('No logs yet.')" + return self._execute_cell(code_to_run, silent, store_history, user_expressions, allow_stdin) else: self._show_user_error("Magic '{}' not supported.".format(subcommand)) return self._run_without_session("", silent, store_history, user_expressions, allow_stdin) @@ -145,7 +152,6 @@ ip.display_formatter.ipython_display_formatter.for_type_by_name('pandas.core.fra def _start_session(self): if not self._session_started: self._session_started = True - self._ipython_display.writeln('Starting Livy Session') add_session_code = "%spark add {} {} {} skip".format( self.client_name, self.session_language, self.connection_string) @@ -266,4 +272,4 @@ ip.display_formatter.ipython_display_formatter.for_type_by_name('pandas.core.fra error = conf.fatal_error_suggestion().format(self._fatal_error) self._logger.error(error) self._ipython_display.send_error(error) - raise ValueError(self._fatal_error) \ No newline at end of file + raise ValueError(self._fatal_error)
Provide %logs for session and in wrapper kernels This should query `/sessions/ID/log?from=0` and return output nicely formatted.
jupyter-incubator/sparkmagic
diff --git a/tests/test_livyclient.py b/tests/test_livyclient.py index 1a2931a..b0d89c2 100644 --- a/tests/test_livyclient.py +++ b/tests/test_livyclient.py @@ -118,3 +118,27 @@ def test_session_id(): i = client.session_id assert i == session_id + + +def test_get_logs_returns_session_logs(): + logs = "hi" + mock_spark_session = MagicMock() + mock_spark_session.logs = logs + client = LivyClient(mock_spark_session) + + res, logs_r = client.get_logs() + + assert res + assert logs_r == logs + + +def test_get_logs_returns_false_with_value_error(): + err = "err" + mock_spark_session = MagicMock() + type(mock_spark_session).logs = PropertyMock(side_effect=ValueError(err)) + client = LivyClient(mock_spark_session) + + res, logs_r = client.get_logs() + + assert not res + assert logs_r == err diff --git a/tests/test_livysession.py b/tests/test_livysession.py index 72d3f58..3e576b3 100644 --- a/tests/test_livysession.py +++ b/tests/test_livysession.py @@ -31,14 +31,14 @@ class TestLivySession: self.pi_result = "Pi is roughly 3.14336" self.session_create_json = '{"id":0,"state":"starting","kind":"spark","log":[]}' - self.ready_sessions_json = '{"from":0,"total":1,"sessions":[{"id":0,"state":"idle","kind":"spark","log":[""]}]}' - self.error_sessions_json = '{"from":0,"total":1,"sessions":[{"id":0,"state":"error","kind":"spark","log":' \ - '[""]}]}' - self.busy_sessions_json = '{"from":0,"total":1,"sessions":[{"id":0,"state":"busy","kind":"spark","log":[""]}]}' + self.ready_sessions_json = '{"id":0,"state":"idle","kind":"spark","log":[""]}' + self.error_sessions_json = '{"id":0,"state":"error","kind":"spark","log":[""]}' + self.busy_sessions_json = '{"id":0,"state":"busy","kind":"spark","log":[""]}' self.post_statement_json = '{"id":0,"state":"running","output":null}' self.running_statement_json = '{"total_statements":1,"statements":[{"id":0,"state":"running","output":null}]}' self.ready_statement_json = '{"total_statements":1,"statements":[{"id":0,"state":"available","output":{"statu' \ 's":"ok","execution_count":0,"data":{"text/plain":"Pi is roughly 3.14336"}}}]}' + self.log_json = '{"id":6,"from":0,"total":212,"log":["hi","hi"]}' self.get_responses = [] self.post_responses = [] @@ -217,7 +217,7 @@ class TestLivySession: http_client.post.assert_called_with( "/sessions", [201], properties) - def test_status_gets_latest(self): + def test_status_gets_latest_status(self): http_client = MagicMock() http_client.post.return_value = DummyResponse(201, self.session_create_json) http_client.get.return_value = DummyResponse(200, self.ready_sessions_json) @@ -229,11 +229,28 @@ class TestLivySession: conf.load() session.start() - session.refresh_status() + session._refresh_status() state = session._status assert_equals("idle", state) - http_client.get.assert_called_with("/sessions", [200]) + http_client.get.assert_called_with("/sessions/0", [200]) + + def test_logs_gets_latest_logs(self): + http_client = MagicMock() + http_client.post.return_value = DummyResponse(201, self.session_create_json) + http_client.get.return_value = DummyResponse(200, self.log_json) + conf.override_all({ + "status_sleep_seconds": 0.01, + "statement_sleep_seconds": 0.01 + }) + session = self._create_session(http_client=http_client) + conf.load() + session.start() + + logs = session.logs + + assert_equals("hi\nhi", logs) + http_client.get.assert_called_with("/sessions/0/log?from=0", [200]) def test_wait_for_idle_returns_when_in_state(self): http_client = MagicMock() @@ -253,7 +270,7 @@ class TestLivySession: session.wait_for_idle(30) - http_client.get.assert_called_with("/sessions", [200]) + http_client.get.assert_called_with("/sessions/0", [200]) assert_equals(2, http_client.get.call_count) @raises(LivyUnexpectedStatusError) @@ -262,7 +279,8 @@ class TestLivySession: http_client.post.return_value = DummyResponse(201, self.session_create_json) self.get_responses = [DummyResponse(200, self.busy_sessions_json), DummyResponse(200, self.busy_sessions_json), - DummyResponse(200, self.error_sessions_json)] + DummyResponse(200, self.error_sessions_json), + DummyResponse(200, self.log_json)] http_client.get.side_effect = self._next_response_get conf.override_all({ diff --git a/tests/test_remotesparkmagics.py b/tests/test_remotesparkmagics.py index e87e52f..ac9b96c 100644 --- a/tests/test_remotesparkmagics.py +++ b/tests/test_remotesparkmagics.py @@ -150,13 +150,15 @@ def test_cleanup_endpoint_command_parses(): mock_method.assert_called_once_with("conn_str") -@raises(ValueError) @with_setup(_setup, _teardown) -def test_bad_command_throws_exception(): +def test_bad_command_writes_error(): line = "bad_command" + usage = "Please look at usage of %spark by executing `%spark?`." magic.spark(line) + ipython_display.send_error.assert_called_once_with("Subcommand '{}' not found. {}".format(line, usage)) + @with_setup(_setup, _teardown) def test_run_cell_command_parses(): @@ -302,3 +304,33 @@ def test_run_sql_command_stores_variable_in_user_ns(): def test_get_livy_kind_covers_all_langs(): for lang in Constants.lang_supported: RemoteSparkMagics._get_livy_kind(lang) + + +@with_setup(_setup, _teardown) +def test_logs_subcommand(): + get_logs_method = MagicMock() + result_value = "" + get_logs_method.return_value = (True, result_value) + spark_controller.get_logs = get_logs_method + + command = "logs -s" + name = "sessions_name" + line = " ".join([command, name]) + cell = "cell code" + + # Could get results + result = magic.spark(line, cell) + + get_logs_method.assert_called_once_with(name) + assert result is None + ipython_display.write.assert_called_once_with(result_value) + + # Could not get results + get_logs_method.reset_mock() + get_logs_method.return_value = (False, result_value) + + result = magic.spark(line, cell) + + get_logs_method.assert_called_once_with(name) + assert result is None + ipython_display.send_error.assert_called_once_with(result_value) diff --git a/tests/test_sparkcontroller.py b/tests/test_sparkcontroller.py index c1c97b8..3a8a6ae 100644 --- a/tests/test_sparkcontroller.py +++ b/tests/test_sparkcontroller.py @@ -179,3 +179,13 @@ def test_delete_session_by_id_non_existent(): assert len(create_session_method.mock_calls) == 0 assert len(session.delete.mock_calls) == 0 + + +@with_setup(_setup, _teardown) +def test_get_logs(): + chosen_client = MagicMock() + controller.get_client_by_name_or_default = MagicMock(return_value=chosen_client) + + controller.get_logs() + + chosen_client.get_logs.assert_called_with() diff --git a/tests/test_sparkkernelbase.py b/tests/test_sparkkernelbase.py index 02628f3..6bef40b 100644 --- a/tests/test_sparkkernelbase.py +++ b/tests/test_sparkkernelbase.py @@ -388,3 +388,21 @@ def test_register_auto_viz(): assert call("from remotespark.datawidgets.utils import display_dataframe\nip = get_ipython()\nip.display_formatter" ".ipython_display_formatter.for_type_by_name('pandas.core.frame', 'DataFrame', display_dataframe)", True, False, None, False) in execute_cell_mock.mock_calls + + +@with_setup(_setup(), _teardown()) +def test_logs_magic(): + kernel._session_started = True + + kernel.do_execute("%logs", False) + + assert call("%spark logs", False, True, None, False) in execute_cell_mock.mock_calls + + +@with_setup(_setup(), _teardown()) +def test_logs_magic_prints_without_session(): + kernel._session_started = False + + kernel.do_execute("%logs", False) + + assert call("print('No logs yet.')", False, True, None, False) in execute_cell_mock.mock_calls
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 5 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -r requirements.txt -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "nose", "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
anyio==4.9.0 argon2-cffi==23.1.0 argon2-cffi-bindings==21.2.0 arrow==1.3.0 async-lru==2.0.5 attrs==25.3.0 babel==2.17.0 beautifulsoup4==4.13.3 bleach==6.2.0 certifi==2025.1.31 cffi==1.17.1 charset-normalizer==3.4.1 comm==0.2.2 decorator==5.2.1 defusedxml==0.7.1 exceptiongroup==1.2.2 fastjsonschema==2.21.1 fqdn==1.5.1 h11==0.14.0 httpcore==1.0.7 httpx==0.28.1 idna==3.10 importlib_metadata==8.6.1 iniconfig==2.1.0 ipykernel==4.2.2 ipython==4.0.2 ipython-genutils==0.2.0 ipywidgets==7.8.5 isoduration==20.11.0 Jinja2==3.1.6 json5==0.10.0 jsonpointer==3.0.0 jsonschema==4.23.0 jsonschema-specifications==2024.10.1 jupyter-events==0.12.0 jupyter-lsp==2.2.5 jupyter_client==8.6.3 jupyter_core==5.7.2 jupyter_server==2.15.0 jupyter_server_terminals==0.5.3 jupyterlab==4.1.5 jupyterlab_pygments==0.3.0 jupyterlab_server==2.27.3 jupyterlab_widgets==1.1.11 MarkupSafe==3.0.2 mistune==3.1.3 mock==5.2.0 nbclient==0.10.2 nbconvert==7.16.6 nbformat==5.10.4 nose==1.3.7 notebook==7.1.3 notebook_shim==0.2.4 numpy==2.0.2 overrides==7.7.0 packaging==24.2 pandas==2.2.3 pandocfilters==1.5.1 pexpect==4.9.0 pickleshare==0.7.5 platformdirs==4.3.7 plotly==1.9.4 pluggy==1.5.0 prometheus_client==0.21.1 ptyprocess==0.7.0 pycparser==2.22 Pygments==2.19.1 pytest==8.3.5 python-dateutil==2.9.0.post0 python-json-logger==3.3.0 pytz==2025.2 PyYAML==6.0.2 pyzmq==26.3.0 referencing==0.36.2 -e git+https://github.com/jupyter-incubator/sparkmagic.git@ac43b2838efaae766a7071a79699b9b192899dd2#egg=remotespark requests==2.32.3 rfc3339-validator==0.1.4 rfc3986-validator==0.1.1 rpds-py==0.24.0 Send2Trash==1.8.3 simplegeneric==0.8.1 six==1.17.0 sniffio==1.3.1 soupsieve==2.6 terminado==0.18.1 tinycss2==1.4.0 tomli==2.2.1 tornado==6.4.2 traitlets==5.14.3 types-python-dateutil==2.9.0.20241206 typing_extensions==4.13.0 tzdata==2025.2 uri-template==1.3.0 urllib3==2.3.0 webcolors==24.11.1 webencodings==0.5.1 websocket-client==1.8.0 widgetsnbextension==3.6.10 zipp==3.21.0
name: sparkmagic channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - anyio==4.9.0 - argon2-cffi==23.1.0 - argon2-cffi-bindings==21.2.0 - arrow==1.3.0 - async-lru==2.0.5 - attrs==25.3.0 - babel==2.17.0 - beautifulsoup4==4.13.3 - bleach==6.2.0 - certifi==2025.1.31 - cffi==1.17.1 - charset-normalizer==3.4.1 - comm==0.2.2 - decorator==5.2.1 - defusedxml==0.7.1 - exceptiongroup==1.2.2 - fastjsonschema==2.21.1 - fqdn==1.5.1 - h11==0.14.0 - httpcore==1.0.7 - httpx==0.28.1 - idna==3.10 - importlib-metadata==8.6.1 - iniconfig==2.1.0 - ipykernel==4.2.2 - ipython==4.0.2 - ipython-genutils==0.2.0 - ipywidgets==7.8.5 - isoduration==20.11.0 - jinja2==3.1.6 - json5==0.10.0 - jsonpointer==3.0.0 - jsonschema==4.23.0 - jsonschema-specifications==2024.10.1 - jupyter-client==8.6.3 - jupyter-core==5.7.2 - jupyter-events==0.12.0 - jupyter-lsp==2.2.5 - jupyter-server==2.15.0 - jupyter-server-terminals==0.5.3 - jupyterlab==4.1.5 - jupyterlab-pygments==0.3.0 - jupyterlab-server==2.27.3 - jupyterlab-widgets==1.1.11 - markupsafe==3.0.2 - mistune==3.1.3 - mock==5.2.0 - nbclient==0.10.2 - nbconvert==7.16.6 - nbformat==5.10.4 - nose==1.3.7 - notebook==7.1.3 - notebook-shim==0.2.4 - numpy==2.0.2 - overrides==7.7.0 - packaging==24.2 - pandas==2.2.3 - pandocfilters==1.5.1 - pexpect==4.9.0 - pickleshare==0.7.5 - platformdirs==4.3.7 - plotly==1.9.4 - pluggy==1.5.0 - prometheus-client==0.21.1 - ptyprocess==0.7.0 - pycparser==2.22 - pygments==2.19.1 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - python-json-logger==3.3.0 - pytz==2025.2 - pyyaml==6.0.2 - pyzmq==26.3.0 - referencing==0.36.2 - requests==2.32.3 - rfc3339-validator==0.1.4 - rfc3986-validator==0.1.1 - rpds-py==0.24.0 - send2trash==1.8.3 - simplegeneric==0.8.1 - six==1.17.0 - sniffio==1.3.1 - soupsieve==2.6 - terminado==0.18.1 - tinycss2==1.4.0 - tomli==2.2.1 - tornado==6.4.2 - traitlets==5.14.3 - types-python-dateutil==2.9.0.20241206 - typing-extensions==4.13.0 - tzdata==2025.2 - uri-template==1.3.0 - urllib3==2.3.0 - webcolors==24.11.1 - webencodings==0.5.1 - websocket-client==1.8.0 - widgetsnbextension==3.6.10 - zipp==3.21.0 prefix: /opt/conda/envs/sparkmagic
[ "tests/test_livyclient.py::test_get_logs_returns_session_logs", "tests/test_livyclient.py::test_get_logs_returns_false_with_value_error" ]
[ "tests/test_remotesparkmagics.py::test_info_command_parses", "tests/test_remotesparkmagics.py::test_info_endpoint_command_parses", "tests/test_remotesparkmagics.py::test_add_sessions_command_parses", "tests/test_remotesparkmagics.py::test_add_sessions_command_extra_properties", "tests/test_remotesparkmagics.py::test_delete_sessions_command_parses", "tests/test_remotesparkmagics.py::test_cleanup_command_parses", "tests/test_remotesparkmagics.py::test_cleanup_endpoint_command_parses", "tests/test_remotesparkmagics.py::test_bad_command_writes_error", "tests/test_remotesparkmagics.py::test_run_cell_command_parses", "tests/test_remotesparkmagics.py::test_run_cell_command_writes_to_err", "tests/test_remotesparkmagics.py::test_run_sql_command_parses", "tests/test_remotesparkmagics.py::test_run_hive_command_parses", "tests/test_remotesparkmagics.py::test_run_sql_command_returns_none_when_exception", "tests/test_remotesparkmagics.py::test_run_hive_command_returns_none_when_exception", "tests/test_remotesparkmagics.py::test_run_sql_command_stores_variable_in_user_ns", "tests/test_remotesparkmagics.py::test_logs_subcommand", "tests/test_sparkcontroller.py::test_add_session", "tests/test_sparkcontroller.py::test_add_session_skip", "tests/test_sparkcontroller.py::test_delete_session", "tests/test_sparkcontroller.py::test_cleanup", "tests/test_sparkcontroller.py::test_run_cell", "tests/test_sparkcontroller.py::test_get_client_keys", "tests/test_sparkcontroller.py::test_get_all_sessions", "tests/test_sparkcontroller.py::test_cleanup_endpoint", "tests/test_sparkcontroller.py::test_delete_session_by_id_existent", "tests/test_sparkcontroller.py::test_delete_session_by_id_non_existent", "tests/test_sparkcontroller.py::test_get_logs", "tests/test_sparkkernelbase.py::test_set_config", "tests/test_sparkkernelbase.py::test_do_execute_initializes_magics_if_not_run", "tests/test_sparkkernelbase.py::test_magic_not_supported", "tests/test_sparkkernelbase.py::test_info", "tests/test_sparkkernelbase.py::test_delete_force", "tests/test_sparkkernelbase.py::test_delete_not_force", "tests/test_sparkkernelbase.py::test_cleanup_force", "tests/test_sparkkernelbase.py::test_cleanup_not_force", "tests/test_sparkkernelbase.py::test_call_spark", "tests/test_sparkkernelbase.py::test_execute_throws_if_fatal_error_happened", "tests/test_sparkkernelbase.py::test_execute_throws_if_fatal_error_happens_for_execution", "tests/test_sparkkernelbase.py::test_call_spark_sql_new_line", "tests/test_sparkkernelbase.py::test_call_spark_hive_new_line", "tests/test_sparkkernelbase.py::test_register_auto_viz", "tests/test_sparkkernelbase.py::test_logs_magic", "tests/test_sparkkernelbase.py::test_logs_magic_prints_without_session" ]
[ "tests/test_livyclient.py::test_doesnt_create_sql_context_automatically", "tests/test_livyclient.py::test_start_creates_sql_context", "tests/test_livyclient.py::test_execute_code", "tests/test_livyclient.py::test_execute_sql", "tests/test_livyclient.py::test_execute_hive", "tests/test_livyclient.py::test_serialize", "tests/test_livyclient.py::test_close_session", "tests/test_livyclient.py::test_kind", "tests/test_livyclient.py::test_session_id", "tests/test_remotesparkmagics.py::test_get_livy_kind_covers_all_langs", "tests/test_sparkkernelbase.py::test_get_config", "tests/test_sparkkernelbase.py::test_get_config_not_set", "tests/test_sparkkernelbase.py::test_initialize_magics", "tests/test_sparkkernelbase.py::test_start_session", "tests/test_sparkkernelbase.py::test_delete_session", "tests/test_sparkkernelbase.py::test_shutdown_cleans_up" ]
[]
Modified BSD License
382
joshvillbrandt__wireless-9
03fe4987e50aae45132ad3f4c2f6cb3ff2263adc
2016-01-17 14:02:20
7d62e873cb2c69185494a2ba037a9e1cc3a74e6f
diff --git a/wireless/Wireless.py b/wireless/Wireless.py index d9bb653..389dee9 100644 --- a/wireless/Wireless.py +++ b/wireless/Wireless.py @@ -8,7 +8,8 @@ from time import sleep def cmd(cmd): return subprocess.Popen( cmd, shell=True, - stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.read() + stdout=subprocess.PIPE, stderr=subprocess.STDOUT + ).stdout.read().decode() # abstracts away wireless connection
Python3 support Traceback (most recent call last): File "/home/xayon/.virtualenvs/smoothie/lib/python3.4/site-packages/rq/worker.py", line 568, in perform_job rv = job.perform() File "/home/xayon/.virtualenvs/smoothie/lib/python3.4/site-packages/rq/job.py", line 495, in perform self._result = self.func(*self.args, **self.kwargs) File "./smoothie/plugins/interfaces.py", line 28, in interfaces return str(Interfaces()) File "./smoothie/plugins/__init__.py", line 37, in __init__ self.run() File "./smoothie/plugins/__init__.py", line 78, in run self.callback() File "./smoothie/plugins/interfaces.py", line 20, in callback ifaces = [a for a in Wireless().interfaces() File "/home/xayon/.virtualenvs/smoothie/lib/python3.4/site-packages/wireless/Wireless.py", line 21, in __init__ self._driver_name = self._detectDriver() File "/home/xayon/.virtualenvs/smoothie/lib/python3.4/site-packages/wireless/Wireless.py", line 47, in _detectDriver if len(response) > 0 and 'not found' not in response: TypeError: 'str' does not support the buffer interface
joshvillbrandt/wireless
diff --git a/tests/TestWireless.py b/tests/TestWireless.py index 0ca7ec0..941d8d3 100644 --- a/tests/TestWireless.py +++ b/tests/TestWireless.py @@ -2,6 +2,7 @@ import unittest from wireless import Wireless +from wireless.Wireless import cmd class TestWireless(unittest.TestCase): @@ -12,3 +13,26 @@ class TestWireless(unittest.TestCase): def test_import(self): # if this module loads, then the import worked... self.assertTrue(hasattr(Wireless, 'connect')) + + +class TestCMD(unittest.TestCase): + """ + Tests against cmd function. + """ + def setUp(self): + self.com = cmd('echo "test_ok"') + self.empty_com = cmd('echo -n') + + def test_cmdcomparission(self): + """ + Check if we can test against the output of + the current test command + """ + self.assertTrue('test_ok' in self.com) + + def test_cmdlen(self): + """ + Check if the output is > 0 chars. + """ + self.assertFalse(len(self.empty_com) > 0) + self.assertTrue(len(self.com) > 0)
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 0, "issue_text_score": 2, "test_score": 1 }, "num_modified_files": 1 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "", "pip_packages": [ "nose", "flake8", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y pandoc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup==1.2.2 flake8==7.2.0 iniconfig==2.1.0 mccabe==0.7.0 nose==1.3.7 packaging==24.2 pluggy==1.5.0 pycodestyle==2.13.0 pyflakes==3.3.1 pytest==8.3.5 tomli==2.2.1 -e git+https://github.com/joshvillbrandt/wireless.git@03fe4987e50aae45132ad3f4c2f6cb3ff2263adc#egg=wireless
name: wireless channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - exceptiongroup==1.2.2 - flake8==7.2.0 - iniconfig==2.1.0 - mccabe==0.7.0 - nose==1.3.7 - packaging==24.2 - pluggy==1.5.0 - pycodestyle==2.13.0 - pyflakes==3.3.1 - pytest==8.3.5 - tomli==2.2.1 prefix: /opt/conda/envs/wireless
[ "tests/TestWireless.py::TestCMD::test_cmdcomparission" ]
[]
[ "tests/TestWireless.py::TestWireless::test_import", "tests/TestWireless.py::TestCMD::test_cmdlen" ]
[]
Apache License 2.0
383
sympy__sympy-10412
8b01598119b063766667cfdb643452680a5356cd
2016-01-18 01:22:30
0241b35cae5f2adc04dc37b25f7dc9c5f00bd746
smichr: Confirmed with the reference added: ```python >>> from time import time >>> t=time();pi_hex_digits(10**2-10 + 1, 10), time()-t ('e90c6cc0ac', 0.0) >>> t=time();pi_hex_digits(10**4-10 + 1, 10), time()-t ('26aab49ec6', 0.17100000381469727) >>> t=time();pi_hex_digits(10**5-10 + 1, 10), time()-t ('a22673c1a5', 4.7109999656677246) >>> t=time();pi_hex_digits(10**6-10 + 1, 10), time()-t ('9ffd342362', 59.985999822616577) >>> t=time();pi_hex_digits(10**7-10 + 1, 10), time()-t ('c1a42e06a1', 689.51800012588501) ```
diff --git a/sympy/ntheory/bbp_pi.py b/sympy/ntheory/bbp_pi.py index 993360675a..38832e0dbb 100644 --- a/sympy/ntheory/bbp_pi.py +++ b/sympy/ntheory/bbp_pi.py @@ -23,59 +23,99 @@ # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Modifications: -1.Once the nth digit is selected the number of digits of working -precision is calculated to ensure that the 14 Hexadecimal representation -of that region is accurate. This was found empirically to be -int((math.log10(n//1000))+18). This was found by searching for a value -of working precision for the n = 0 and n = 1 then n was increased until -the result was less precise, therefore increased again this was repeated -for increasing n and an effective fit was found between n and the -working precision value. - -2. The while loop to evaluate whether the series has converged has be -replaced with a fixed for loop, that option was selected because in a -very large number of cases the loop converged to a point where no -difference can be detected in less than 15 iterations. (done for more -accurate memory and time banking). - -3. output hex string constrained to 14 characters (accuracy assured to be -n = 10**7) + +1.Once the nth digit and desired number of digits is selected, the +number of digits of working precision is calculated to ensure that +the hexadecimal digits returned are accurate. This is calculated as + + int(math.log(start + prec)/math.log(16) + prec + 3) + --------------------------------------- -------- + / / + number of hex digits additional digits + +This was checked by the following code which completed without +errors (and dig are the digits included in the test_bbp.py file): + + for i in range(0,1000): + for j in range(1,1000): + a, b = pi_hex_digits(i, j), dig[i:i+j] + if a != b: + print('%s\n%s'%(a,b)) + +Deceasing the additional digits by 1 generated errors, so '3' is +the smallest additional precision needed to calculate the above +loop without errors. The following trailing 10 digits were also +checked to be accurate (and the times were slightly faster with +some of the constant modifications that were made): + + >> from time import time + >> t=time();pi_hex_digits(10**2-10 + 1, 10), time()-t + ('e90c6cc0ac', 0.0) + >> t=time();pi_hex_digits(10**4-10 + 1, 10), time()-t + ('26aab49ec6', 0.17100000381469727) + >> t=time();pi_hex_digits(10**5-10 + 1, 10), time()-t + ('a22673c1a5', 4.7109999656677246) + >> t=time();pi_hex_digits(10**6-10 + 1, 10), time()-t + ('9ffd342362', 59.985999822616577) + >> t=time();pi_hex_digits(10**7-10 + 1, 10), time()-t + ('c1a42e06a1', 689.51800012588501) + +2. The while loop to evaluate whether the series has converged quits +when the addition amount `dt` has dropped to zero. + +3. the formatting string to convert the decimal to hexidecimal is +calculated for the given precision. 4. pi_hex_digits(n) changed to have coefficient to the formula in an array (perhaps just a matter of preference). ''' + from __future__ import print_function, division import math -from sympy.core.compatibility import range +from sympy.core.compatibility import range, as_int -def _series(j, n): +def _series(j, n, prec=14): # Left sum from the bbp algorithm s = 0 - D = _dn(n) + D = _dn(n, prec) + D4 = 4 * D + k = 0 + d = 8 * k + j for k in range(n + 1): - r = 8*k + j - s += (pow(16, n - k, r) << 4 * D) // r + s += (pow(16, n - k, d) << D4) // d + d += 8 - # Right sum. should iterate to infinty, but now just iterates to the point where - # one iterations change is beyond the resolution of the data type used + # Right sum iterates to infinity for full precision, but we + # stop at the point where one iteration is beyond the precision + # specified. t = 0 - for k in range(n + 1, n + 15): - xp = int(16**(n - k) * 16**D) - t += xp // (8 * k + j) + k = n + 1 + e = 4*(D + n - k) + d = 8 * k + j + while True: + dt = (1 << e) // d + if not dt: + break + t += dt + # k += 1 + e -= 4 + d += 8 total = s + t return total -def pi_hex_digits(n): - """Returns a string containing 14 digits after the nth value of pi in hex - The decimal has been taken out of the number, so - n = 0[0] = 3 # First digit of pi in hex, 3 +def pi_hex_digits(n, prec=14): + """Returns a string containing ``prec`` (default 14) digits + starting at the nth digit of pi in hex. Counting of digits + starts at 0 and the decimal is not counted, so for n = 0 the + returned value starts with 3; n = 1 corresponds to the first + digit past the decimal point (which in hex is 2). Examples ======== @@ -83,9 +123,19 @@ def pi_hex_digits(n): >>> from sympy.ntheory.bbp_pi import pi_hex_digits >>> pi_hex_digits(0) '3243f6a8885a30' - >>> pi_hex_digits(10) - '5a308d313198a2' + >>> pi_hex_digits(0, 3) + '324' + + References + ========== + + .. [1] http://www.numberworld.org/digits/Pi/ """ + n, prec = as_int(n), as_int(prec) + if n < 0: + raise ValueError('n cannot be negative') + if prec == 0: + return '' # main of implementation arrays holding formulae coefficients n -= 1 @@ -93,18 +143,19 @@ def pi_hex_digits(n): j = [1, 4, 5, 6] #formulae - x = + (a[0]*_series(j[0], n) - - a[1]*_series(j[1], n) - - a[2]*_series(j[2], n) - - a[3]*_series(j[3], n)) & (16**(_dn(n)) - 1) + D = _dn(n, prec) + x = + (a[0]*_series(j[0], n, prec) + - a[1]*_series(j[1], n, prec) + - a[2]*_series(j[2], n, prec) + - a[3]*_series(j[3], n, prec)) & (16**D - 1) - s = ("%014x" % x) - #s is constrained between 0 and 14 - return s[:14] + s = ("%0" + "%ix" % prec) % (x // 16**(D - prec)) + return s -def _dn(n): +def _dn(n, prec): # controller for n dependence on precision - if (n < 1000): - return 16 - return int(math.log10(n//1000) + 18) + # n = starting digit index + # prec = the number of total digits to compute + n += 1 # because we subtract 1 for _series + return int(math.log(n + prec)/math.log(16) + prec + 3)
pi_hex_digits doesn't print leading 0 and errors ```python >>> from sympy.ntheory.bbp_pi import pi_hex_digits >>> pi_hex_digits(13) '8d313198a2e037' <-- should be '08d313198a2e03' ``` In addition the following starting pts lead to errors in the last of the 14 digits (and only the differeing digits are shown). There are more errors, but these are the first instances where a progressively larger trailing error is found: ``` pi_hex_digts(14) -> ...6 should be ...7 pi_hex_digts(381) -> ...af should be ...b0 pi_hex_digts(722) -> ...1ff should be ...200 ```
sympy/sympy
diff --git a/sympy/ntheory/tests/test_bbp_pi.py b/sympy/ntheory/tests/test_bbp_pi.py index d063079ed4..2b0918e48a 100644 --- a/sympy/ntheory/tests/test_bbp_pi.py +++ b/sympy/ntheory/tests/test_bbp_pi.py @@ -1,7 +1,133 @@ +from random import randint + from sympy.ntheory.bbp_pi import pi_hex_digits +from sympy.utilities.pytest import raises + + +# http://www.herongyang.com/Cryptography/Blowfish-First-8366-Hex-Digits-of-PI.html +# There are actually 8336 listed there; with the preppended 3 there are 8337 +# below +dig=''.join(''' +3243f6a8885a308d313198a2e03707344a4093822299f31d0082efa98ec4e6c89452821e638d013 +77be5466cf34e90c6cc0ac29b7c97c50dd3f84d5b5b54709179216d5d98979fb1bd1310ba698dfb5 +ac2ffd72dbd01adfb7b8e1afed6a267e96ba7c9045f12c7f9924a19947b3916cf70801f2e2858efc +16636920d871574e69a458fea3f4933d7e0d95748f728eb658718bcd5882154aee7b54a41dc25a59 +b59c30d5392af26013c5d1b023286085f0ca417918b8db38ef8e79dcb0603a180e6c9e0e8bb01e8a +3ed71577c1bd314b2778af2fda55605c60e65525f3aa55ab945748986263e8144055ca396a2aab10 +b6b4cc5c341141e8cea15486af7c72e993b3ee1411636fbc2a2ba9c55d741831f6ce5c3e169b8793 +1eafd6ba336c24cf5c7a325381289586773b8f48986b4bb9afc4bfe81b6628219361d809ccfb21a9 +91487cac605dec8032ef845d5de98575b1dc262302eb651b8823893e81d396acc50f6d6ff383f442 +392e0b4482a484200469c8f04a9e1f9b5e21c66842f6e96c9a670c9c61abd388f06a51a0d2d8542f +68960fa728ab5133a36eef0b6c137a3be4ba3bf0507efb2a98a1f1651d39af017666ca593e82430e +888cee8619456f9fb47d84a5c33b8b5ebee06f75d885c12073401a449f56c16aa64ed3aa62363f77 +061bfedf72429b023d37d0d724d00a1248db0fead349f1c09b075372c980991b7b25d479d8f6e8de +f7e3fe501ab6794c3b976ce0bd04c006bac1a94fb6409f60c45e5c9ec2196a246368fb6faf3e6c53 +b51339b2eb3b52ec6f6dfc511f9b30952ccc814544af5ebd09bee3d004de334afd660f2807192e4b +b3c0cba85745c8740fd20b5f39b9d3fbdb5579c0bd1a60320ad6a100c6402c7279679f25fefb1fa3 +cc8ea5e9f8db3222f83c7516dffd616b152f501ec8ad0552ab323db5fafd23876053317b483e00df +829e5c57bbca6f8ca01a87562edf1769dbd542a8f6287effc3ac6732c68c4f5573695b27b0bbca58 +c8e1ffa35db8f011a010fa3d98fd2183b84afcb56c2dd1d35b9a53e479b6f84565d28e49bc4bfb97 +90e1ddf2daa4cb7e3362fb1341cee4c6e8ef20cada36774c01d07e9efe2bf11fb495dbda4dae9091 +98eaad8e716b93d5a0d08ed1d0afc725e08e3c5b2f8e7594b78ff6e2fbf2122b648888b812900df0 +1c4fad5ea0688fc31cd1cff191b3a8c1ad2f2f2218be0e1777ea752dfe8b021fa1e5a0cc0fb56f74 +e818acf3d6ce89e299b4a84fe0fd13e0b77cc43b81d2ada8d9165fa2668095770593cc7314211a14 +77e6ad206577b5fa86c75442f5fb9d35cfebcdaf0c7b3e89a0d6411bd3ae1e7e4900250e2d2071b3 +5e226800bb57b8e0af2464369bf009b91e5563911d59dfa6aa78c14389d95a537f207d5ba202e5b9 +c5832603766295cfa911c819684e734a41b3472dca7b14a94a1b5100529a532915d60f573fbc9bc6 +e42b60a47681e6740008ba6fb5571be91ff296ec6b2a0dd915b6636521e7b9f9b6ff34052ec58556 +6453b02d5da99f8fa108ba47996e85076a4b7a70e9b5b32944db75092ec4192623ad6ea6b049a7df +7d9cee60b88fedb266ecaa8c71699a17ff5664526cc2b19ee1193602a575094c29a0591340e4183a +3e3f54989a5b429d656b8fe4d699f73fd6a1d29c07efe830f54d2d38e6f0255dc14cdd20868470eb +266382e9c6021ecc5e09686b3f3ebaefc93c9718146b6a70a1687f358452a0e286b79c5305aa5007 +373e07841c7fdeae5c8e7d44ec5716f2b8b03ada37f0500c0df01c1f040200b3ffae0cf51a3cb574 +b225837a58dc0921bdd19113f97ca92ff69432477322f547013ae5e58137c2dadcc8b576349af3dd +a7a94461460fd0030eecc8c73ea4751e41e238cd993bea0e2f3280bba1183eb3314e548b384f6db9 +086f420d03f60a04bf2cb8129024977c795679b072bcaf89afde9a771fd9930810b38bae12dccf3f +2e5512721f2e6b7124501adde69f84cd877a5847187408da17bc9f9abce94b7d8cec7aec3adb851d +fa63094366c464c3d2ef1c18473215d908dd433b3724c2ba1612a14d432a65c45150940002133ae4 +dd71dff89e10314e5581ac77d65f11199b043556f1d7a3c76b3c11183b5924a509f28fe6ed97f1fb +fa9ebabf2c1e153c6e86e34570eae96fb1860e5e0a5a3e2ab3771fe71c4e3d06fa2965dcb999e71d +0f803e89d65266c8252e4cc9789c10b36ac6150eba94e2ea78a5fc3c531e0a2df4f2f74ea7361d2b +3d1939260f19c279605223a708f71312b6ebadfe6eeac31f66e3bc4595a67bc883b17f37d1018cff +28c332ddefbe6c5aa56558218568ab9802eecea50fdb2f953b2aef7dad5b6e2f841521b628290761 +70ecdd4775619f151013cca830eb61bd960334fe1eaa0363cfb5735c904c70a239d59e9e0bcbaade +14eecc86bc60622ca79cab5cabb2f3846e648b1eaf19bdf0caa02369b9655abb5040685a323c2ab4 +b3319ee9d5c021b8f79b540b19875fa09995f7997e623d7da8f837889a97e32d7711ed935f166812 +810e358829c7e61fd696dedfa17858ba9957f584a51b2272639b83c3ff1ac24696cdb30aeb532e30 +548fd948e46dbc312858ebf2ef34c6ffeafe28ed61ee7c3c735d4a14d9e864b7e342105d14203e13 +e045eee2b6a3aaabeadb6c4f15facb4fd0c742f442ef6abbb5654f3b1d41cd2105d81e799e86854d +c7e44b476a3d816250cf62a1f25b8d2646fc8883a0c1c7b6a37f1524c369cb749247848a0b5692b2 +85095bbf00ad19489d1462b17423820e0058428d2a0c55f5ea1dadf43e233f70613372f0928d937e +41d65fecf16c223bdb7cde3759cbee74604085f2a7ce77326ea607808419f8509ee8efd85561d997 +35a969a7aac50c06c25a04abfc800bcadc9e447a2ec3453484fdd567050e1e9ec9db73dbd3105588 +cd675fda79e3674340c5c43465713e38d83d28f89ef16dff20153e21e78fb03d4ae6e39f2bdb83ad +f7e93d5a68948140f7f64c261c94692934411520f77602d4f7bcf46b2ed4a20068d40824713320f4 +6a43b7d4b7500061af1e39f62e9724454614214f74bf8b88404d95fc1d96b591af70f4ddd366a02f +45bfbc09ec03bd97857fac6dd031cb850496eb27b355fd3941da2547e6abca0a9a28507825530429 +f40a2c86dae9b66dfb68dc1462d7486900680ec0a427a18dee4f3ffea2e887ad8cb58ce0067af4d6 +b6aace1e7cd3375fecce78a399406b2a4220fe9e35d9f385b9ee39d7ab3b124e8b1dc9faf74b6d18 +5626a36631eae397b23a6efa74dd5b43326841e7f7ca7820fbfb0af54ed8feb397454056acba4895 +2755533a3a20838d87fe6ba9b7d096954b55a867bca1159a58cca9296399e1db33a62a4a563f3125 +f95ef47e1c9029317cfdf8e80204272f7080bb155c05282ce395c11548e4c66d2248c1133fc70f86 +dc07f9c9ee41041f0f404779a45d886e17325f51ebd59bc0d1f2bcc18f41113564257b7834602a9c +60dff8e8a31f636c1b0e12b4c202e1329eaf664fd1cad181156b2395e0333e92e13b240b62eebeb9 +2285b2a20ee6ba0d99de720c8c2da2f728d012784595b794fd647d0862e7ccf5f05449a36f877d48 +fac39dfd27f33e8d1e0a476341992eff743a6f6eabf4f8fd37a812dc60a1ebddf8991be14cdb6e6b +0dc67b55106d672c372765d43bdcd0e804f1290dc7cc00ffa3b5390f92690fed0b667b9ffbcedb7d +9ca091cf0bd9155ea3bb132f88515bad247b9479bf763bd6eb37392eb3cc1159798026e297f42e31 +2d6842ada7c66a2b3b12754ccc782ef11c6a124237b79251e706a1bbe64bfb63501a6b101811caed +fa3d25bdd8e2e1c3c9444216590a121386d90cec6ed5abea2a64af674eda86a85fbebfe98864e4c3 +fe9dbc8057f0f7c08660787bf86003604dd1fd8346f6381fb07745ae04d736fccc83426b33f01eab +71b08041873c005e5f77a057bebde8ae2455464299bf582e614e58f48ff2ddfda2f474ef388789bd +c25366f9c3c8b38e74b475f25546fcd9b97aeb26618b1ddf84846a0e79915f95e2466e598e20b457 +708cd55591c902de4cb90bace1bb8205d011a862487574a99eb77f19b6e0a9dc09662d09a1c43246 +33e85a1f0209f0be8c4a99a0251d6efe101ab93d1d0ba5a4dfa186f20f2868f169dcb7da83573906 +fea1e2ce9b4fcd7f5250115e01a70683faa002b5c40de6d0279af88c27773f8641c3604c0661a806 +b5f0177a28c0f586e0006058aa30dc7d6211e69ed72338ea6353c2dd94c2c21634bbcbee5690bcb6 +deebfc7da1ce591d766f05e4094b7c018839720a3d7c927c2486e3725f724d9db91ac15bb4d39eb8 +fced54557808fca5b5d83d7cd34dad0fc41e50ef5eb161e6f8a28514d96c51133c6fd5c7e756e14e +c4362abfceddc6c837d79a323492638212670efa8e406000e03a39ce37d3faf5cfabc277375ac52d +1b5cb0679e4fa33742d382274099bc9bbed5118e9dbf0f7315d62d1c7ec700c47bb78c1b6b21a190 +45b26eb1be6a366eb45748ab2fbc946e79c6a376d26549c2c8530ff8ee468dde7dd5730a1d4cd04d +c62939bbdba9ba4650ac9526e8be5ee304a1fad5f06a2d519a63ef8ce29a86ee22c089c2b843242e +f6a51e03aa9cf2d0a483c061ba9be96a4d8fe51550ba645bd62826a2f9a73a3ae14ba99586ef5562 +e9c72fefd3f752f7da3f046f6977fa0a5980e4a91587b086019b09e6ad3b3ee593e990fd5a9e34d7 +972cf0b7d9022b8b5196d5ac3a017da67dd1cf3ed67c7d2d281f9f25cfadf2b89b5ad6b4725a88f5 +4ce029ac71e019a5e647b0acfded93fa9be8d3c48d283b57ccf8d5662979132e28785f0191ed7560 +55f7960e44e3d35e8c15056dd488f46dba03a161250564f0bdc3eb9e153c9057a297271aeca93a07 +2a1b3f6d9b1e6321f5f59c66fb26dcf3197533d928b155fdf5035634828aba3cbb28517711c20ad9 +f8abcc5167ccad925f4de817513830dc8e379d58629320f991ea7a90c2fb3e7bce5121ce64774fbe +32a8b6e37ec3293d4648de53696413e680a2ae0810dd6db22469852dfd09072166b39a460a6445c0 +dd586cdecf1c20c8ae5bbef7dd1b588d40ccd2017f6bb4e3bbdda26a7e3a59ff453e350a44bcb4cd +d572eacea8fa6484bb8d6612aebf3c6f47d29be463542f5d9eaec2771bf64e6370740e0d8de75b13 +57f8721671af537d5d4040cb084eb4e2cc34d2466a0115af84e1b0042895983a1d06b89fb4ce6ea0 +486f3f3b823520ab82011a1d4b277227f8611560b1e7933fdcbb3a792b344525bda08839e151ce79 +4b2f32c9b7a01fbac9e01cc87ebcc7d1f6cf0111c3a1e8aac71a908749d44fbd9ad0dadecbd50ada +380339c32ac69136678df9317ce0b12b4ff79e59b743f5bb3af2d519ff27d9459cbf97222c15e6fc +2a0f91fc719b941525fae59361ceb69cebc2a8645912baa8d1b6c1075ee3056a0c10d25065cb03a4 +42e0ec6e0e1698db3b4c98a0be3278e9649f1f9532e0d392dfd3a0342b8971f21e1b0a74414ba334 +8cc5be7120c37632d8df359f8d9b992f2ee60b6f470fe3f11de54cda541edad891ce6279cfcd3e7e +6f1618b166fd2c1d05848fd2c5f6fb2299f523f357a632762393a8353156cccd02acf081625a75eb +b56e16369788d273ccde96629281b949d04c50901b71c65614e6c6c7bd327a140a45e1d006c3f27b +9ac9aa53fd62a80f00bb25bfe235bdd2f671126905b2040222b6cbcf7ccd769c2b53113ec01640e3 +d338abbd602547adf0ba38209cf746ce7677afa1c52075606085cbfe4e8ae88dd87aaaf9b04cf9aa +7e1948c25c02fb8a8c01c36ae4d6ebe1f990d4f869a65cdea03f09252dc208e69fb74e6132ce77e2 +5b578fdfe33ac372e6'''.split()) def test_hex_pi_nth_digits(): assert pi_hex_digits(0) == '3243f6a8885a30' - assert pi_hex_digits(1) == '243f6a8885a308' + assert pi_hex_digits(1) == '243f6a8885a308' assert pi_hex_digits(10000) == '68ac8fcfb8016c' + assert pi_hex_digits(13) == '08d313198a2e03' + assert pi_hex_digits(0, 3) == '324' + assert pi_hex_digits(0, 0) == '' + raises(ValueError, lambda: pi_hex_digits(-1)) + raises(ValueError, lambda: pi_hex_digits(3.14)) + + # this will pick a random segment to compute every time + # it is run. If it ever fails, there is an error in the + # computation. + n = randint(0, len(dig)) + prec = randint(0, len(dig) - n) + assert pi_hex_digits(n, prec) == dig[n: n + prec]
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 1 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work mpmath==1.3.0 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 -e git+https://github.com/sympy/sympy.git@8b01598119b063766667cfdb643452680a5356cd#egg=sympy toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: sympy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - mpmath==1.3.0 prefix: /opt/conda/envs/sympy
[ "sympy/ntheory/tests/test_bbp_pi.py::test_hex_pi_nth_digits" ]
[]
[]
[]
BSD
384
sympy__sympy-10416
8f5fc1b68da9930b65e362fa33a4d8cbeb17bdbf
2016-01-18 08:29:12
0241b35cae5f2adc04dc37b25f7dc9c5f00bd746
diff --git a/sympy/combinatorics/perm_groups.py b/sympy/combinatorics/perm_groups.py index b1565fd52f..720abd62d3 100644 --- a/sympy/combinatorics/perm_groups.py +++ b/sympy/combinatorics/perm_groups.py @@ -123,10 +123,10 @@ def __new__(cls, *args, **kwargs): """The default constructor. Accepts Cycle and Permutation forms. Removes duplicates unless ``dups`` keyword is False. """ + args = list(args[0] if is_sequence(args[0]) else args) if not args: - args = [Permutation()] - else: - args = list(args[0] if is_sequence(args[0]) else args) + raise ValueError('must supply one or more permutations ' + 'to define the group') if any(isinstance(a, Cycle) for a in args): args = [Permutation(a) for a in args] if has_variety(a.size for a in args): @@ -1568,7 +1568,7 @@ def is_nilpotent(self): else: return self._is_nilpotent - def is_normal(self, gr, strict=True): + def is_normal(self, gr): """Test if G=self is a normal subgroup of gr. G is normal in gr if @@ -1590,20 +1590,12 @@ def is_normal(self, gr, strict=True): True """ - d_self = self.degree - d_gr = gr.degree - new_self = self.copy() - if not strict and d_self != d_gr: - if d_self < d_gr: - new_self = PermGroup(new_self.generators + [Permutation(d_gr - 1)]) - else: - gr = PermGroup(gr.generators + [Permutation(d_self - 1)]) - gens2 = [p._array_form for p in new_self.generators] + gens2 = [p._array_form for p in self.generators] gens1 = [p._array_form for p in gr.generators] for g1 in gens1: for g2 in gens2: p = _af_rmuln(g1, g2, _af_invert(g1)) - if not new_self.coset_factor(p, True): + if not self.coset_factor(p, True): return False return True diff --git a/sympy/combinatorics/permutations.py b/sympy/combinatorics/permutations.py index cf68727df8..7e7a052247 100644 --- a/sympy/combinatorics/permutations.py +++ b/sympy/combinatorics/permutations.py @@ -235,7 +235,7 @@ class Cycle(dict): Wrapper around dict which provides the functionality of a disjoint cycle. A cycle shows the rule to use to move subsets of elements to obtain - a permutation. The Cycle class is more flexible than Permutation in + a permutation. The Cycle class is more flexible that Permutation in that 1) all elements need not be present in order to investigate how multiple cycles act in sequence and 2) it can contain singletons: diff --git a/sympy/printing/pretty/pretty.py b/sympy/printing/pretty/pretty.py index 034c977ef6..555d318930 100644 --- a/sympy/printing/pretty/pretty.py +++ b/sympy/printing/pretty/pretty.py @@ -1444,7 +1444,7 @@ def _print_ProductSet(self, p): from sympy import Pow return self._print(Pow(p.sets[0], len(p.sets), evaluate=False)) else: - prod_char = u('\xd7') + prod_char = u("\N{MULTIPLICATION SIGN}") if self._use_unicode else 'x' return self._print_seq(p.sets, None, None, ' %s ' % prod_char, parenthesize=lambda set: set.is_Union or set.is_Intersection or set.is_ProductSet)
ascii pprint of ProductSet uses non-ascii multiplication symbol ```` >>> srepr(A) 'ProductSet(FiniteSet(Rational(13, 111), Integer(2), Integer(3)), FiniteSet(exp(Integer(10)), Mul(Rational(1, 2), pi)))' >>> pprint(A, use_unicode=True) ⎧ 13 ⎫ ⎧π 10⎫ ⎨───, 2, 3⎬ × ⎨─, ℯ ⎬ ⎩111 ⎭ ⎩2 ⎭ >>> pprint(A, use_unicode=False) 13 pi 10 {---, 2, 3} × {--, e } 111 2 ```` Last one should be `x` not `\xd7`. See `printing/pretty/pretty.py`. Possible fix might be: `prod_char = U('MULTIPLICATION SIGN') if self._use_unicode else 'x'` Also needs a test---can be simplier than mine.
sympy/sympy
diff --git a/sympy/combinatorics/tests/test_perm_groups.py b/sympy/combinatorics/tests/test_perm_groups.py index 3f45083ba7..d8f7118cda 100644 --- a/sympy/combinatorics/tests/test_perm_groups.py +++ b/sympy/combinatorics/tests/test_perm_groups.py @@ -60,7 +60,6 @@ def test_order(): b = Permutation([2, 1, 3, 4, 5, 6, 7, 8, 9, 0]) g = PermutationGroup([a, b]) assert g.order() == 1814400 - assert PermutationGroup().order() == 1 def test_equality(): @@ -274,10 +273,6 @@ def test_is_normal(): assert G1.is_subgroup(G6) assert not G1.is_subgroup(G4) assert G2.is_subgroup(G4) - s4 = PermutationGroup(Permutation(0, 1, 2, 3), Permutation(3)(0, 1)) - s6 = PermutationGroup(Permutation(0, 1, 2, 3, 5), Permutation(5)(0, 1)) - assert s6.is_normal(s4, strict=False) - assert not s4.is_normal(s6, strict=False) def test_eq(): @@ -722,7 +717,3 @@ def test_elements(): def test_is_group(): assert PermutationGroup(Permutation(1,2), Permutation(2,4)).is_group == True assert SymmetricGroup(4).is_group == True - - -def test_PermutationGroup(): - assert PermutationGroup() == PermutationGroup(Permutation()) diff --git a/sympy/printing/pretty/tests/test_pretty.py b/sympy/printing/pretty/tests/test_pretty.py index 0724493609..ea40affa5c 100644 --- a/sympy/printing/pretty/tests/test_pretty.py +++ b/sympy/printing/pretty/tests/test_pretty.py @@ -3147,6 +3147,13 @@ def test_ProductSet_paranthesis(): a, b, c = Interval(2, 3), Interval(4, 7), Interval(1, 9) assert upretty(Union(a*b, b*FiniteSet(1, 2))) == ucode_str +def test_ProductSet_prod_char_issue_10413(): + ascii_str = '[2, 3] x [4, 7]' + ucode_str = u('[2, 3] × [4, 7]') + + a, b = Interval(2, 3), Interval(4, 7) + assert pretty(a*b) == ascii_str + assert upretty(a*b) == ucode_str def test_pretty_sequences(): s1 = SeqFormula(a**2, (0, oo))
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 3 }
0.7
{ "env_vars": null, "env_yml_path": [], "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "", "pip_packages": [ "pytest" ], "pre_install": [], "python": "3.6", "reqs_path": [], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 importlib-metadata==4.8.3 iniconfig==1.1.1 mpmath==1.3.0 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 -e git+https://github.com/sympy/sympy.git@8f5fc1b68da9930b65e362fa33a4d8cbeb17bdbf#egg=sympy tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: sympy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - mpmath==1.3.0 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/sympy
[ "sympy/printing/pretty/tests/test_pretty.py::test_ProductSet_prod_char_issue_10413" ]
[]
[ "sympy/combinatorics/tests/test_perm_groups.py::test_has", "sympy/combinatorics/tests/test_perm_groups.py::test_generate", "sympy/combinatorics/tests/test_perm_groups.py::test_order", "sympy/combinatorics/tests/test_perm_groups.py::test_equality", "sympy/combinatorics/tests/test_perm_groups.py::test_stabilizer", "sympy/combinatorics/tests/test_perm_groups.py::test_center", "sympy/combinatorics/tests/test_perm_groups.py::test_centralizer", "sympy/combinatorics/tests/test_perm_groups.py::test_coset_rank", "sympy/combinatorics/tests/test_perm_groups.py::test_coset_factor", "sympy/combinatorics/tests/test_perm_groups.py::test_orbits", "sympy/combinatorics/tests/test_perm_groups.py::test_is_normal", "sympy/combinatorics/tests/test_perm_groups.py::test_eq", "sympy/combinatorics/tests/test_perm_groups.py::test_derived_subgroup", "sympy/combinatorics/tests/test_perm_groups.py::test_is_solvable", "sympy/combinatorics/tests/test_perm_groups.py::test_rubik1", "sympy/combinatorics/tests/test_perm_groups.py::test_direct_product", "sympy/combinatorics/tests/test_perm_groups.py::test_orbit_rep", "sympy/combinatorics/tests/test_perm_groups.py::test_schreier_vector", "sympy/combinatorics/tests/test_perm_groups.py::test_random_pr", "sympy/combinatorics/tests/test_perm_groups.py::test_is_alt_sym", "sympy/combinatorics/tests/test_perm_groups.py::test_minimal_block", "sympy/combinatorics/tests/test_perm_groups.py::test_max_div", "sympy/combinatorics/tests/test_perm_groups.py::test_is_primitive", "sympy/combinatorics/tests/test_perm_groups.py::test_random_stab", "sympy/combinatorics/tests/test_perm_groups.py::test_transitivity_degree", "sympy/combinatorics/tests/test_perm_groups.py::test_schreier_sims_random", "sympy/combinatorics/tests/test_perm_groups.py::test_baseswap", "sympy/combinatorics/tests/test_perm_groups.py::test_schreier_sims_incremental", "sympy/combinatorics/tests/test_perm_groups.py::test_subgroup_search", "sympy/combinatorics/tests/test_perm_groups.py::test_normal_closure", "sympy/combinatorics/tests/test_perm_groups.py::test_derived_series", "sympy/combinatorics/tests/test_perm_groups.py::test_lower_central_series", "sympy/combinatorics/tests/test_perm_groups.py::test_commutator", "sympy/combinatorics/tests/test_perm_groups.py::test_is_nilpotent", "sympy/combinatorics/tests/test_perm_groups.py::test_is_trivial", "sympy/combinatorics/tests/test_perm_groups.py::test_pointwise_stabilizer", "sympy/combinatorics/tests/test_perm_groups.py::test_make_perm", "sympy/combinatorics/tests/test_perm_groups.py::test_elements", "sympy/combinatorics/tests/test_perm_groups.py::test_is_group", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_ascii_str", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_unicode_str", "sympy/printing/pretty/tests/test_pretty.py::test_upretty_greek", "sympy/printing/pretty/tests/test_pretty.py::test_upretty_multiindex", "sympy/printing/pretty/tests/test_pretty.py::test_upretty_sub_super", "sympy/printing/pretty/tests/test_pretty.py::test_upretty_subs_missing_in_24", "sympy/printing/pretty/tests/test_pretty.py::test_upretty_modifiers", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_Cycle", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_basic", "sympy/printing/pretty/tests/test_pretty.py::test_negative_fractions", "sympy/printing/pretty/tests/test_pretty.py::test_issue_5524", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_ordering", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_relational", "sympy/printing/pretty/tests/test_pretty.py::test_Assignment", "sympy/printing/pretty/tests/test_pretty.py::test_issue_7117", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_rational", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_functions", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_sqrt", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_sqrt_char_knob", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_sqrt_longsymbol_no_sqrt_char", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_KroneckerDelta", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_product", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_lambda", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_order", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_derivatives", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_integrals", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_matrix", "sympy/printing/pretty/tests/test_pretty.py::test_Adjoint", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_piecewise", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_seq", "sympy/printing/pretty/tests/test_pretty.py::test_any_object_in_sequence", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_sets", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_ConditionSet", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_ComplexRegion", "sympy/printing/pretty/tests/test_pretty.py::test_ProductSet_paranthesis", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_sequences", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_FourierSeries", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_FormalPowerSeries", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_limits", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_ComplexRootOf", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_RootSum", "sympy/printing/pretty/tests/test_pretty.py::test_GroebnerBasis", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_Boolean", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_Domain", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_prec", "sympy/printing/pretty/tests/test_pretty.py::test_pprint", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_class", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_no_wrap_line", "sympy/printing/pretty/tests/test_pretty.py::test_settings", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_sum", "sympy/printing/pretty/tests/test_pretty.py::test_units", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_Subs", "sympy/printing/pretty/tests/test_pretty.py::test_gammas", "sympy/printing/pretty/tests/test_pretty.py::test_hyper", "sympy/printing/pretty/tests/test_pretty.py::test_meijerg", "sympy/printing/pretty/tests/test_pretty.py::test_noncommutative", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_special_functions", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_geometry", "sympy/printing/pretty/tests/test_pretty.py::test_expint", "sympy/printing/pretty/tests/test_pretty.py::test_elliptic_functions", "sympy/printing/pretty/tests/test_pretty.py::test_RandomDomain", "sympy/printing/pretty/tests/test_pretty.py::test_PrettyPoly", "sympy/printing/pretty/tests/test_pretty.py::test_issue_6285", "sympy/printing/pretty/tests/test_pretty.py::test_issue_6359", "sympy/printing/pretty/tests/test_pretty.py::test_issue_6739", "sympy/printing/pretty/tests/test_pretty.py::test_complicated_symbol_unchanged", "sympy/printing/pretty/tests/test_pretty.py::test_categories", "sympy/printing/pretty/tests/test_pretty.py::test_PrettyModules", "sympy/printing/pretty/tests/test_pretty.py::test_QuotientRing", "sympy/printing/pretty/tests/test_pretty.py::test_Homomorphism", "sympy/printing/pretty/tests/test_pretty.py::test_Tr", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_Add", "sympy/printing/pretty/tests/test_pretty.py::test_issue_7179", "sympy/printing/pretty/tests/test_pretty.py::test_issue_7180", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_Complement", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_SymmetricDifference", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_Contains", "sympy/printing/pretty/tests/test_pretty.py::test_issue_8292", "sympy/printing/pretty/tests/test_pretty.py::test_issue_4335", "sympy/printing/pretty/tests/test_pretty.py::test_issue_8344", "sympy/printing/pretty/tests/test_pretty.py::test_issue_6324", "sympy/printing/pretty/tests/test_pretty.py::test_issue_7927", "sympy/printing/pretty/tests/test_pretty.py::test_issue_6134", "sympy/printing/pretty/tests/test_pretty.py::test_issue_9877" ]
[]
BSD
385
sympy__sympy-10417
8f5fc1b68da9930b65e362fa33a4d8cbeb17bdbf
2016-01-18 11:00:52
0241b35cae5f2adc04dc37b25f7dc9c5f00bd746
diff --git a/sympy/combinatorics/perm_groups.py b/sympy/combinatorics/perm_groups.py index b1565fd52f..720abd62d3 100644 --- a/sympy/combinatorics/perm_groups.py +++ b/sympy/combinatorics/perm_groups.py @@ -123,10 +123,10 @@ def __new__(cls, *args, **kwargs): """The default constructor. Accepts Cycle and Permutation forms. Removes duplicates unless ``dups`` keyword is False. """ + args = list(args[0] if is_sequence(args[0]) else args) if not args: - args = [Permutation()] - else: - args = list(args[0] if is_sequence(args[0]) else args) + raise ValueError('must supply one or more permutations ' + 'to define the group') if any(isinstance(a, Cycle) for a in args): args = [Permutation(a) for a in args] if has_variety(a.size for a in args): @@ -1568,7 +1568,7 @@ def is_nilpotent(self): else: return self._is_nilpotent - def is_normal(self, gr, strict=True): + def is_normal(self, gr): """Test if G=self is a normal subgroup of gr. G is normal in gr if @@ -1590,20 +1590,12 @@ def is_normal(self, gr, strict=True): True """ - d_self = self.degree - d_gr = gr.degree - new_self = self.copy() - if not strict and d_self != d_gr: - if d_self < d_gr: - new_self = PermGroup(new_self.generators + [Permutation(d_gr - 1)]) - else: - gr = PermGroup(gr.generators + [Permutation(d_self - 1)]) - gens2 = [p._array_form for p in new_self.generators] + gens2 = [p._array_form for p in self.generators] gens1 = [p._array_form for p in gr.generators] for g1 in gens1: for g2 in gens2: p = _af_rmuln(g1, g2, _af_invert(g1)) - if not new_self.coset_factor(p, True): + if not self.coset_factor(p, True): return False return True diff --git a/sympy/combinatorics/permutations.py b/sympy/combinatorics/permutations.py index cf68727df8..7e7a052247 100644 --- a/sympy/combinatorics/permutations.py +++ b/sympy/combinatorics/permutations.py @@ -235,7 +235,7 @@ class Cycle(dict): Wrapper around dict which provides the functionality of a disjoint cycle. A cycle shows the rule to use to move subsets of elements to obtain - a permutation. The Cycle class is more flexible than Permutation in + a permutation. The Cycle class is more flexible that Permutation in that 1) all elements need not be present in order to investigate how multiple cycles act in sequence and 2) it can contain singletons: diff --git a/sympy/printing/pretty/pretty.py b/sympy/printing/pretty/pretty.py index 034c977ef6..0f6bc11bd0 100644 --- a/sympy/printing/pretty/pretty.py +++ b/sympy/printing/pretty/pretty.py @@ -1496,7 +1496,7 @@ def _print_AccumuBounds(self, i): def _print_Intersection(self, u): - delimiter = ' %s ' % pretty_atom('Intersection') + delimiter = ' %s ' % pretty_atom('Intersection', 'n') return self._print_seq(u.args, None, None, delimiter, parenthesize=lambda set: set.is_ProductSet or @@ -1504,7 +1504,7 @@ def _print_Intersection(self, u): def _print_Union(self, u): - union_delimiter = ' %s ' % pretty_atom('Union') + union_delimiter = ' %s ' % pretty_atom('Union', 'U') return self._print_seq(u.args, None, None, union_delimiter, parenthesize=lambda set: set.is_ProductSet or
`pprint(Union, use_unicode=False)` raises error (but `str(Union)` works) See `printing/pretty/pretty.py`. 1. Possible fix: `union_delimiter = ' %s ' % pretty_atom('Union', 'U')` 2. Similarly, I wonder if lower-case `n` might be an appropriate ascii character for `Intersection`? If not, at least throw `NotImplemented` like in `SymmetricDifference`.
sympy/sympy
diff --git a/sympy/combinatorics/tests/test_perm_groups.py b/sympy/combinatorics/tests/test_perm_groups.py index 3f45083ba7..d8f7118cda 100644 --- a/sympy/combinatorics/tests/test_perm_groups.py +++ b/sympy/combinatorics/tests/test_perm_groups.py @@ -60,7 +60,6 @@ def test_order(): b = Permutation([2, 1, 3, 4, 5, 6, 7, 8, 9, 0]) g = PermutationGroup([a, b]) assert g.order() == 1814400 - assert PermutationGroup().order() == 1 def test_equality(): @@ -274,10 +273,6 @@ def test_is_normal(): assert G1.is_subgroup(G6) assert not G1.is_subgroup(G4) assert G2.is_subgroup(G4) - s4 = PermutationGroup(Permutation(0, 1, 2, 3), Permutation(3)(0, 1)) - s6 = PermutationGroup(Permutation(0, 1, 2, 3, 5), Permutation(5)(0, 1)) - assert s6.is_normal(s4, strict=False) - assert not s4.is_normal(s6, strict=False) def test_eq(): @@ -722,7 +717,3 @@ def test_elements(): def test_is_group(): assert PermutationGroup(Permutation(1,2), Permutation(2,4)).is_group == True assert SymmetricGroup(4).is_group == True - - -def test_PermutationGroup(): - assert PermutationGroup() == PermutationGroup(Permutation()) diff --git a/sympy/printing/pretty/tests/test_pretty.py b/sympy/printing/pretty/tests/test_pretty.py index 0724493609..ac6d0120a2 100644 --- a/sympy/printing/pretty/tests/test_pretty.py +++ b/sympy/printing/pretty/tests/test_pretty.py @@ -3139,6 +3139,20 @@ def test_pretty_ComplexRegion(): ucode_str = u('{r⋅(ⅈ⋅sin(θ) + cos(θ)) | r, θ ∊ [0, 1] × [0, 2⋅π)}') assert upretty(ComplexRegion(Interval(0, 1)*Interval(0, 2*pi), polar=True)) == ucode_str +def test_pretty_Union_issue_10414(): + a, b = Interval(2, 3), Interval(4, 7) + ucode_str = u('[2, 3] ∪ [4, 7]') + ascii_str = '[2, 3] U [4, 7]' + assert upretty(Union(a, b)) == ucode_str + assert pretty(Union(a, b)) == ascii_str + +def test_pretty_Intersection_issue_10414(): + x, y, z, w = symbols('x, y, z, w') + a, b = Interval(x, y), Interval(z, w) + ucode_str = u('[x, y] ∩ [z, w]') + ascii_str = '[x, y] n [z, w]' + assert upretty(Intersection(a, b)) == ucode_str + assert pretty(Intersection(a, b)) == ascii_str def test_ProductSet_paranthesis(): from sympy import Interval, Union, FiniteSet
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 3 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "mpmath>=0.19", "pytest" ], "pre_install": null, "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work mpmath==1.3.0 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 -e git+https://github.com/sympy/sympy.git@8f5fc1b68da9930b65e362fa33a4d8cbeb17bdbf#egg=sympy toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: sympy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - mpmath==1.3.0 prefix: /opt/conda/envs/sympy
[ "sympy/printing/pretty/tests/test_pretty.py::test_pretty_Union_issue_10414", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_Intersection_issue_10414" ]
[]
[ "sympy/combinatorics/tests/test_perm_groups.py::test_has", "sympy/combinatorics/tests/test_perm_groups.py::test_generate", "sympy/combinatorics/tests/test_perm_groups.py::test_order", "sympy/combinatorics/tests/test_perm_groups.py::test_equality", "sympy/combinatorics/tests/test_perm_groups.py::test_stabilizer", "sympy/combinatorics/tests/test_perm_groups.py::test_center", "sympy/combinatorics/tests/test_perm_groups.py::test_centralizer", "sympy/combinatorics/tests/test_perm_groups.py::test_coset_rank", "sympy/combinatorics/tests/test_perm_groups.py::test_coset_factor", "sympy/combinatorics/tests/test_perm_groups.py::test_orbits", "sympy/combinatorics/tests/test_perm_groups.py::test_is_normal", "sympy/combinatorics/tests/test_perm_groups.py::test_eq", "sympy/combinatorics/tests/test_perm_groups.py::test_derived_subgroup", "sympy/combinatorics/tests/test_perm_groups.py::test_is_solvable", "sympy/combinatorics/tests/test_perm_groups.py::test_rubik1", "sympy/combinatorics/tests/test_perm_groups.py::test_direct_product", "sympy/combinatorics/tests/test_perm_groups.py::test_orbit_rep", "sympy/combinatorics/tests/test_perm_groups.py::test_schreier_vector", "sympy/combinatorics/tests/test_perm_groups.py::test_random_pr", "sympy/combinatorics/tests/test_perm_groups.py::test_is_alt_sym", "sympy/combinatorics/tests/test_perm_groups.py::test_minimal_block", "sympy/combinatorics/tests/test_perm_groups.py::test_max_div", "sympy/combinatorics/tests/test_perm_groups.py::test_is_primitive", "sympy/combinatorics/tests/test_perm_groups.py::test_random_stab", "sympy/combinatorics/tests/test_perm_groups.py::test_transitivity_degree", "sympy/combinatorics/tests/test_perm_groups.py::test_schreier_sims_random", "sympy/combinatorics/tests/test_perm_groups.py::test_baseswap", "sympy/combinatorics/tests/test_perm_groups.py::test_schreier_sims_incremental", "sympy/combinatorics/tests/test_perm_groups.py::test_subgroup_search", "sympy/combinatorics/tests/test_perm_groups.py::test_normal_closure", "sympy/combinatorics/tests/test_perm_groups.py::test_derived_series", "sympy/combinatorics/tests/test_perm_groups.py::test_lower_central_series", "sympy/combinatorics/tests/test_perm_groups.py::test_commutator", "sympy/combinatorics/tests/test_perm_groups.py::test_is_nilpotent", "sympy/combinatorics/tests/test_perm_groups.py::test_is_trivial", "sympy/combinatorics/tests/test_perm_groups.py::test_pointwise_stabilizer", "sympy/combinatorics/tests/test_perm_groups.py::test_make_perm", "sympy/combinatorics/tests/test_perm_groups.py::test_elements", "sympy/combinatorics/tests/test_perm_groups.py::test_is_group", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_ascii_str", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_unicode_str", "sympy/printing/pretty/tests/test_pretty.py::test_upretty_greek", "sympy/printing/pretty/tests/test_pretty.py::test_upretty_multiindex", "sympy/printing/pretty/tests/test_pretty.py::test_upretty_sub_super", "sympy/printing/pretty/tests/test_pretty.py::test_upretty_subs_missing_in_24", "sympy/printing/pretty/tests/test_pretty.py::test_upretty_modifiers", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_Cycle", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_basic", "sympy/printing/pretty/tests/test_pretty.py::test_negative_fractions", "sympy/printing/pretty/tests/test_pretty.py::test_issue_5524", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_ordering", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_relational", "sympy/printing/pretty/tests/test_pretty.py::test_Assignment", "sympy/printing/pretty/tests/test_pretty.py::test_issue_7117", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_rational", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_functions", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_sqrt", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_sqrt_char_knob", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_sqrt_longsymbol_no_sqrt_char", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_KroneckerDelta", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_product", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_lambda", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_order", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_derivatives", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_integrals", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_matrix", "sympy/printing/pretty/tests/test_pretty.py::test_Adjoint", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_piecewise", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_seq", "sympy/printing/pretty/tests/test_pretty.py::test_any_object_in_sequence", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_sets", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_ConditionSet", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_ComplexRegion", "sympy/printing/pretty/tests/test_pretty.py::test_ProductSet_paranthesis", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_sequences", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_FourierSeries", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_FormalPowerSeries", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_limits", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_ComplexRootOf", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_RootSum", "sympy/printing/pretty/tests/test_pretty.py::test_GroebnerBasis", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_Boolean", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_Domain", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_prec", "sympy/printing/pretty/tests/test_pretty.py::test_pprint", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_class", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_no_wrap_line", "sympy/printing/pretty/tests/test_pretty.py::test_settings", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_sum", "sympy/printing/pretty/tests/test_pretty.py::test_units", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_Subs", "sympy/printing/pretty/tests/test_pretty.py::test_gammas", "sympy/printing/pretty/tests/test_pretty.py::test_hyper", "sympy/printing/pretty/tests/test_pretty.py::test_meijerg", "sympy/printing/pretty/tests/test_pretty.py::test_noncommutative", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_special_functions", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_geometry", "sympy/printing/pretty/tests/test_pretty.py::test_expint", "sympy/printing/pretty/tests/test_pretty.py::test_elliptic_functions", "sympy/printing/pretty/tests/test_pretty.py::test_RandomDomain", "sympy/printing/pretty/tests/test_pretty.py::test_PrettyPoly", "sympy/printing/pretty/tests/test_pretty.py::test_issue_6285", "sympy/printing/pretty/tests/test_pretty.py::test_issue_6359", "sympy/printing/pretty/tests/test_pretty.py::test_issue_6739", "sympy/printing/pretty/tests/test_pretty.py::test_complicated_symbol_unchanged", "sympy/printing/pretty/tests/test_pretty.py::test_categories", "sympy/printing/pretty/tests/test_pretty.py::test_PrettyModules", "sympy/printing/pretty/tests/test_pretty.py::test_QuotientRing", "sympy/printing/pretty/tests/test_pretty.py::test_Homomorphism", "sympy/printing/pretty/tests/test_pretty.py::test_Tr", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_Add", "sympy/printing/pretty/tests/test_pretty.py::test_issue_7179", "sympy/printing/pretty/tests/test_pretty.py::test_issue_7180", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_Complement", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_SymmetricDifference", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_Contains", "sympy/printing/pretty/tests/test_pretty.py::test_issue_8292", "sympy/printing/pretty/tests/test_pretty.py::test_issue_4335", "sympy/printing/pretty/tests/test_pretty.py::test_issue_8344", "sympy/printing/pretty/tests/test_pretty.py::test_issue_6324", "sympy/printing/pretty/tests/test_pretty.py::test_issue_7927", "sympy/printing/pretty/tests/test_pretty.py::test_issue_6134", "sympy/printing/pretty/tests/test_pretty.py::test_issue_9877" ]
[]
BSD
386
sympy__sympy-10420
8f5fc1b68da9930b65e362fa33a4d8cbeb17bdbf
2016-01-18 17:13:25
0241b35cae5f2adc04dc37b25f7dc9c5f00bd746
diff --git a/sympy/sets/fancysets.py b/sympy/sets/fancysets.py index dc8b409185..c229519ee6 100644 --- a/sympy/sets/fancysets.py +++ b/sympy/sets/fancysets.py @@ -903,7 +903,7 @@ def _union(self, other): elif self.polar and other.polar: return ComplexRegion(Union(self.sets, other.sets), polar=True) - if other.is_subset(S.Reals): + if self == S.Complexes: return self return None
Union of ComplexRegion and Interval returns ComplexRegion ``` >>> c1 = ComplexRegion(Interval(0, 1)*Interval(0, 2*S.Pi), polar=True) >>> c1.union(Interval(2, 4)) == c1 True ``` I think this should return a `Union` of `ComplexRegion` and the `Interval`
sympy/sympy
diff --git a/sympy/sets/tests/test_fancysets.py b/sympy/sets/tests/test_fancysets.py index fe36f2b566..d61929d4a1 100644 --- a/sympy/sets/tests/test_fancysets.py +++ b/sympy/sets/tests/test_fancysets.py @@ -347,7 +347,7 @@ def test_ComplexRegion_intersect(): # unevaluated object C1 = ComplexRegion(Interval(0, 1)*Interval(0, 2*S.Pi), polar=True) C2 = ComplexRegion(Interval(-1, 1)*Interval(-1, 1)) - assert C1.intersect(C2) == Intersection(C1, C2) + assert C1.intersect(C2) == Intersection(C1, C2, evaluate=False) def test_ComplexRegion_union(): @@ -376,8 +376,8 @@ def test_ComplexRegion_union(): assert c5.union(c6) == ComplexRegion(p3) assert c7.union(c8) == ComplexRegion(p4) - assert c1.union(Interval(2, 4)) == Union(c1, Interval(2, 4)) - assert c5.union(Interval(2, 4)) == Union(c5, Interval(2, 4)) + assert c1.union(Interval(2, 4)) == Union(c1, Interval(2, 4), evaluate=False) + assert c5.union(Interval(2, 4)) == Union(c5, Interval(2, 4), evaluate=False) def test_ComplexRegion_measure():
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 1 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "mpmath>=0.19", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y python3-pip" ], "python": "3.6", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work certifi==2021.5.30 importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work mpmath==1.3.0 packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work pytest==6.2.4 -e git+https://github.com/sympy/sympy.git@8f5fc1b68da9930b65e362fa33a4d8cbeb17bdbf#egg=sympy toml @ file:///tmp/build/80754af9/toml_1616166611790/work typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
name: sympy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=21.4.0=pyhd3eb1b0_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - importlib-metadata=4.8.1=py36h06a4308_0 - importlib_metadata=4.8.1=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - more-itertools=8.12.0=pyhd3eb1b0_0 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=21.3=pyhd3eb1b0_0 - pip=21.2.2=py36h06a4308_0 - pluggy=0.13.1=py36h06a4308_0 - py=1.11.0=pyhd3eb1b0_0 - pyparsing=3.0.4=pyhd3eb1b0_0 - pytest=6.2.4=py36h06a4308_2 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - toml=0.10.2=pyhd3eb1b0_0 - typing_extensions=4.1.1=pyh06a4308_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zipp=3.6.0=pyhd3eb1b0_0 - zlib=1.2.13=h5eee18b_1 - pip: - mpmath==1.3.0 prefix: /opt/conda/envs/sympy
[ "sympy/sets/tests/test_fancysets.py::test_ComplexRegion_union" ]
[]
[ "sympy/sets/tests/test_fancysets.py::test_naturals", "sympy/sets/tests/test_fancysets.py::test_naturals0", "sympy/sets/tests/test_fancysets.py::test_integers", "sympy/sets/tests/test_fancysets.py::test_ImageSet", "sympy/sets/tests/test_fancysets.py::test_image_is_ImageSet", "sympy/sets/tests/test_fancysets.py::test_ImageSet_iterator_not_injetive", "sympy/sets/tests/test_fancysets.py::test_Range", "sympy/sets/tests/test_fancysets.py::test_range_interval_intersection", "sympy/sets/tests/test_fancysets.py::test_fun", "sympy/sets/tests/test_fancysets.py::test_Reals", "sympy/sets/tests/test_fancysets.py::test_Complex", "sympy/sets/tests/test_fancysets.py::test_intersections", "sympy/sets/tests/test_fancysets.py::test_infinitely_indexed_set_1", "sympy/sets/tests/test_fancysets.py::test_infinitely_indexed_set_2", "sympy/sets/tests/test_fancysets.py::test_imageset_intersect_real", "sympy/sets/tests/test_fancysets.py::test_ImageSet_simplification", "sympy/sets/tests/test_fancysets.py::test_ComplexRegion_contains", "sympy/sets/tests/test_fancysets.py::test_ComplexRegion_intersect", "sympy/sets/tests/test_fancysets.py::test_ComplexRegion_measure", "sympy/sets/tests/test_fancysets.py::test_normalize_theta_set", "sympy/sets/tests/test_fancysets.py::test_ComplexRegion_FiniteSet", "sympy/sets/tests/test_fancysets.py::test_union_RealSubSet" ]
[]
BSD
387
sympy__sympy-10423
8f5fc1b68da9930b65e362fa33a4d8cbeb17bdbf
2016-01-19 02:38:09
0241b35cae5f2adc04dc37b25f7dc9c5f00bd746
diff --git a/sympy/combinatorics/perm_groups.py b/sympy/combinatorics/perm_groups.py index b1565fd52f..720abd62d3 100644 --- a/sympy/combinatorics/perm_groups.py +++ b/sympy/combinatorics/perm_groups.py @@ -123,10 +123,10 @@ def __new__(cls, *args, **kwargs): """The default constructor. Accepts Cycle and Permutation forms. Removes duplicates unless ``dups`` keyword is False. """ + args = list(args[0] if is_sequence(args[0]) else args) if not args: - args = [Permutation()] - else: - args = list(args[0] if is_sequence(args[0]) else args) + raise ValueError('must supply one or more permutations ' + 'to define the group') if any(isinstance(a, Cycle) for a in args): args = [Permutation(a) for a in args] if has_variety(a.size for a in args): @@ -1568,7 +1568,7 @@ def is_nilpotent(self): else: return self._is_nilpotent - def is_normal(self, gr, strict=True): + def is_normal(self, gr): """Test if G=self is a normal subgroup of gr. G is normal in gr if @@ -1590,20 +1590,12 @@ def is_normal(self, gr, strict=True): True """ - d_self = self.degree - d_gr = gr.degree - new_self = self.copy() - if not strict and d_self != d_gr: - if d_self < d_gr: - new_self = PermGroup(new_self.generators + [Permutation(d_gr - 1)]) - else: - gr = PermGroup(gr.generators + [Permutation(d_self - 1)]) - gens2 = [p._array_form for p in new_self.generators] + gens2 = [p._array_form for p in self.generators] gens1 = [p._array_form for p in gr.generators] for g1 in gens1: for g2 in gens2: p = _af_rmuln(g1, g2, _af_invert(g1)) - if not new_self.coset_factor(p, True): + if not self.coset_factor(p, True): return False return True diff --git a/sympy/combinatorics/permutations.py b/sympy/combinatorics/permutations.py index cf68727df8..7e7a052247 100644 --- a/sympy/combinatorics/permutations.py +++ b/sympy/combinatorics/permutations.py @@ -235,7 +235,7 @@ class Cycle(dict): Wrapper around dict which provides the functionality of a disjoint cycle. A cycle shows the rule to use to move subsets of elements to obtain - a permutation. The Cycle class is more flexible than Permutation in + a permutation. The Cycle class is more flexible that Permutation in that 1) all elements need not be present in order to investigate how multiple cycles act in sequence and 2) it can contain singletons: diff --git a/sympy/printing/pretty/pretty.py b/sympy/printing/pretty/pretty.py index 034c977ef6..32b1592010 100644 --- a/sympy/printing/pretty/pretty.py +++ b/sympy/printing/pretty/pretty.py @@ -664,6 +664,13 @@ def _print_MatrixBase(self, e): _print_ImmutableMatrix = _print_MatrixBase _print_Matrix = _print_MatrixBase + def _print_Trace(self, e): + D = self._print(e.arg) + D = prettyForm(*D.parens('(',')')) + D.baseline = D.height()//2 + D = prettyForm(*D.left('\n'*(0) + 'tr')) + return D + def _print_MatrixElement(self, expr): from sympy.matrices import MatrixSymbol
pretty printing: `Trace` could be improved (and LaTeX) ```` In [69]: X = ImmutableMatrix([[1, 2], [3, 4]]) In [70]: pprint(X) ⎡1 2⎤ ⎢ ⎥ ⎣3 4⎦ In [71]: w = Trace(X) + Trace(2*X) In [72]: pprint(w) Trace(Matrix([ + Trace(Matrix([ [1, 2], [2, 4], [3, 4]])) [6, 8]])) ```` I suppose `pprint(Trace(X))` should be: ```` ⎛⎡1 2⎤⎞ tr⎜⎢ ⎥⎟ ⎝⎣3 4⎦⎠ ````
sympy/sympy
diff --git a/sympy/combinatorics/tests/test_perm_groups.py b/sympy/combinatorics/tests/test_perm_groups.py index 3f45083ba7..d8f7118cda 100644 --- a/sympy/combinatorics/tests/test_perm_groups.py +++ b/sympy/combinatorics/tests/test_perm_groups.py @@ -60,7 +60,6 @@ def test_order(): b = Permutation([2, 1, 3, 4, 5, 6, 7, 8, 9, 0]) g = PermutationGroup([a, b]) assert g.order() == 1814400 - assert PermutationGroup().order() == 1 def test_equality(): @@ -274,10 +273,6 @@ def test_is_normal(): assert G1.is_subgroup(G6) assert not G1.is_subgroup(G4) assert G2.is_subgroup(G4) - s4 = PermutationGroup(Permutation(0, 1, 2, 3), Permutation(3)(0, 1)) - s6 = PermutationGroup(Permutation(0, 1, 2, 3, 5), Permutation(5)(0, 1)) - assert s6.is_normal(s4, strict=False) - assert not s4.is_normal(s6, strict=False) def test_eq(): @@ -722,7 +717,3 @@ def test_elements(): def test_is_group(): assert PermutationGroup(Permutation(1,2), Permutation(2,4)).is_group == True assert SymmetricGroup(4).is_group == True - - -def test_PermutationGroup(): - assert PermutationGroup() == PermutationGroup(Permutation()) diff --git a/sympy/printing/pretty/tests/test_pretty.py b/sympy/printing/pretty/tests/test_pretty.py index 0724493609..25a0e30c75 100644 --- a/sympy/printing/pretty/tests/test_pretty.py +++ b/sympy/printing/pretty/tests/test_pretty.py @@ -4,7 +4,7 @@ FiniteSet, Function, Ge, Gt, I, Implies, Integral, Lambda, Le, Limit, Lt, Matrix, Mul, Nand, Ne, Nor, Not, O, Or, Pow, Product, QQ, RR, Rational, Ray, rootof, RootSum, S, - Segment, Subs, Sum, Symbol, Tuple, Xor, ZZ, conjugate, + Segment, Subs, Sum, Symbol, Tuple, Trace, Xor, ZZ, conjugate, groebner, oo, pi, symbols, ilex, grlex, Range, Contains, SeqPer, SeqFormula, SeqAdd, SeqMul, Interval, Union, fourier_series, fps, Complement, FiniteSet, Interval, Intersection, Union) @@ -2627,6 +2627,39 @@ def test_Adjoint(): assert upretty(Transpose(Adjoint(X))) == \ u(" T\n⎛ †⎞ \n⎝X ⎠ ") +def test_pretty_Trace_issue_9044(): + X = Matrix([[1, 2], [3, 4]]) + Y = Matrix([[2, 4], [6, 8]]) + ascii_str_1 = \ +"""\ + /[1 2]\\ +tr|[ ]| + \[3 4]/\ +""" + ucode_str_1 = \ +u("""\ + ⎛⎡1 2⎤⎞ +tr⎜⎢ ⎥⎟ + ⎝⎣3 4⎦⎠\ +""") + ascii_str_2 = \ +"""\ + /[1 2]\ /[2 4]\\ +tr|[ ]| + tr|[ ]| + \[3 4]/ \[6 8]/\ +""" + ucode_str_2 = \ +u("""\ + ⎛⎡1 2⎤⎞ ⎛⎡2 4⎤⎞ +tr⎜⎢ ⎥⎟ + tr⎜⎢ ⎥⎟ + ⎝⎣3 4⎦⎠ ⎝⎣6 8⎦⎠\ +""") + assert pretty(Trace(X)) == ascii_str_1 + assert upretty(Trace(X)) == ucode_str_1 + + assert pretty(Trace(X) + Trace(Y)) == ascii_str_2 + assert upretty(Trace(X) + Trace(Y)) == ucode_str_2 + def test_pretty_piecewise(): expr = Piecewise((x, x < 1), (x**2, True))
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 2, "test_score": 0 }, "num_modified_files": 3 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "mpmath>=0.19", "pytest" ], "pre_install": [], "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///croot/attrs_1668696182826/work certifi @ file:///croot/certifi_1671487769961/work/certifi flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work mpmath==1.3.0 packaging @ file:///croot/packaging_1671697413597/work pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pytest==7.1.2 -e git+https://github.com/sympy/sympy.git@8f5fc1b68da9930b65e362fa33a4d8cbeb17bdbf#egg=sympy tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing_extensions @ file:///croot/typing_extensions_1669924550328/work zipp @ file:///croot/zipp_1672387121353/work
name: sympy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=22.1.0=py37h06a4308_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - flit-core=3.6.0=pyhd3eb1b0_0 - importlib-metadata=4.11.3=py37h06a4308_0 - importlib_metadata=4.11.3=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=22.0=py37h06a4308_0 - pip=22.3.1=py37h06a4308_0 - pluggy=1.0.0=py37h06a4308_1 - py=1.11.0=pyhd3eb1b0_0 - pytest=7.1.2=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py37h06a4308_0 - typing_extensions=4.4.0=py37h06a4308_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zipp=3.11.0=py37h06a4308_0 - zlib=1.2.13=h5eee18b_1 - pip: - mpmath==1.3.0 prefix: /opt/conda/envs/sympy
[ "sympy/printing/pretty/tests/test_pretty.py::test_pretty_Trace_issue_9044" ]
[ "sympy/combinatorics/tests/test_perm_groups.py::test_generate", "sympy/combinatorics/tests/test_perm_groups.py::test_stabilizer", "sympy/combinatorics/tests/test_perm_groups.py::test_coset_rank", "sympy/combinatorics/tests/test_perm_groups.py::test_elements" ]
[ "sympy/combinatorics/tests/test_perm_groups.py::test_has", "sympy/combinatorics/tests/test_perm_groups.py::test_order", "sympy/combinatorics/tests/test_perm_groups.py::test_equality", "sympy/combinatorics/tests/test_perm_groups.py::test_center", "sympy/combinatorics/tests/test_perm_groups.py::test_centralizer", "sympy/combinatorics/tests/test_perm_groups.py::test_coset_factor", "sympy/combinatorics/tests/test_perm_groups.py::test_orbits", "sympy/combinatorics/tests/test_perm_groups.py::test_is_normal", "sympy/combinatorics/tests/test_perm_groups.py::test_eq", "sympy/combinatorics/tests/test_perm_groups.py::test_derived_subgroup", "sympy/combinatorics/tests/test_perm_groups.py::test_is_solvable", "sympy/combinatorics/tests/test_perm_groups.py::test_rubik1", "sympy/combinatorics/tests/test_perm_groups.py::test_direct_product", "sympy/combinatorics/tests/test_perm_groups.py::test_orbit_rep", "sympy/combinatorics/tests/test_perm_groups.py::test_schreier_vector", "sympy/combinatorics/tests/test_perm_groups.py::test_random_pr", "sympy/combinatorics/tests/test_perm_groups.py::test_is_alt_sym", "sympy/combinatorics/tests/test_perm_groups.py::test_minimal_block", "sympy/combinatorics/tests/test_perm_groups.py::test_max_div", "sympy/combinatorics/tests/test_perm_groups.py::test_is_primitive", "sympy/combinatorics/tests/test_perm_groups.py::test_random_stab", "sympy/combinatorics/tests/test_perm_groups.py::test_transitivity_degree", "sympy/combinatorics/tests/test_perm_groups.py::test_schreier_sims_random", "sympy/combinatorics/tests/test_perm_groups.py::test_baseswap", "sympy/combinatorics/tests/test_perm_groups.py::test_schreier_sims_incremental", "sympy/combinatorics/tests/test_perm_groups.py::test_subgroup_search", "sympy/combinatorics/tests/test_perm_groups.py::test_normal_closure", "sympy/combinatorics/tests/test_perm_groups.py::test_derived_series", "sympy/combinatorics/tests/test_perm_groups.py::test_lower_central_series", "sympy/combinatorics/tests/test_perm_groups.py::test_commutator", "sympy/combinatorics/tests/test_perm_groups.py::test_is_nilpotent", "sympy/combinatorics/tests/test_perm_groups.py::test_is_trivial", "sympy/combinatorics/tests/test_perm_groups.py::test_pointwise_stabilizer", "sympy/combinatorics/tests/test_perm_groups.py::test_make_perm", "sympy/combinatorics/tests/test_perm_groups.py::test_is_group", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_ascii_str", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_unicode_str", "sympy/printing/pretty/tests/test_pretty.py::test_upretty_greek", "sympy/printing/pretty/tests/test_pretty.py::test_upretty_multiindex", "sympy/printing/pretty/tests/test_pretty.py::test_upretty_sub_super", "sympy/printing/pretty/tests/test_pretty.py::test_upretty_subs_missing_in_24", "sympy/printing/pretty/tests/test_pretty.py::test_upretty_modifiers", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_Cycle", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_basic", "sympy/printing/pretty/tests/test_pretty.py::test_negative_fractions", "sympy/printing/pretty/tests/test_pretty.py::test_issue_5524", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_ordering", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_relational", "sympy/printing/pretty/tests/test_pretty.py::test_Assignment", "sympy/printing/pretty/tests/test_pretty.py::test_issue_7117", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_rational", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_functions", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_sqrt", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_sqrt_char_knob", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_sqrt_longsymbol_no_sqrt_char", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_KroneckerDelta", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_product", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_lambda", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_order", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_derivatives", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_integrals", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_matrix", "sympy/printing/pretty/tests/test_pretty.py::test_Adjoint", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_piecewise", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_seq", "sympy/printing/pretty/tests/test_pretty.py::test_any_object_in_sequence", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_sets", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_ConditionSet", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_ComplexRegion", "sympy/printing/pretty/tests/test_pretty.py::test_ProductSet_paranthesis", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_sequences", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_FourierSeries", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_FormalPowerSeries", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_limits", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_ComplexRootOf", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_RootSum", "sympy/printing/pretty/tests/test_pretty.py::test_GroebnerBasis", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_Boolean", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_Domain", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_prec", "sympy/printing/pretty/tests/test_pretty.py::test_pprint", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_class", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_no_wrap_line", "sympy/printing/pretty/tests/test_pretty.py::test_settings", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_sum", "sympy/printing/pretty/tests/test_pretty.py::test_units", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_Subs", "sympy/printing/pretty/tests/test_pretty.py::test_gammas", "sympy/printing/pretty/tests/test_pretty.py::test_hyper", "sympy/printing/pretty/tests/test_pretty.py::test_meijerg", "sympy/printing/pretty/tests/test_pretty.py::test_noncommutative", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_special_functions", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_geometry", "sympy/printing/pretty/tests/test_pretty.py::test_expint", "sympy/printing/pretty/tests/test_pretty.py::test_elliptic_functions", "sympy/printing/pretty/tests/test_pretty.py::test_RandomDomain", "sympy/printing/pretty/tests/test_pretty.py::test_PrettyPoly", "sympy/printing/pretty/tests/test_pretty.py::test_issue_6285", "sympy/printing/pretty/tests/test_pretty.py::test_issue_6359", "sympy/printing/pretty/tests/test_pretty.py::test_issue_6739", "sympy/printing/pretty/tests/test_pretty.py::test_complicated_symbol_unchanged", "sympy/printing/pretty/tests/test_pretty.py::test_categories", "sympy/printing/pretty/tests/test_pretty.py::test_PrettyModules", "sympy/printing/pretty/tests/test_pretty.py::test_QuotientRing", "sympy/printing/pretty/tests/test_pretty.py::test_Homomorphism", "sympy/printing/pretty/tests/test_pretty.py::test_Tr", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_Add", "sympy/printing/pretty/tests/test_pretty.py::test_issue_7179", "sympy/printing/pretty/tests/test_pretty.py::test_issue_7180", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_Complement", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_SymmetricDifference", "sympy/printing/pretty/tests/test_pretty.py::test_pretty_Contains", "sympy/printing/pretty/tests/test_pretty.py::test_issue_8292", "sympy/printing/pretty/tests/test_pretty.py::test_issue_4335", "sympy/printing/pretty/tests/test_pretty.py::test_issue_8344", "sympy/printing/pretty/tests/test_pretty.py::test_issue_6324", "sympy/printing/pretty/tests/test_pretty.py::test_issue_7927", "sympy/printing/pretty/tests/test_pretty.py::test_issue_6134", "sympy/printing/pretty/tests/test_pretty.py::test_issue_9877" ]
[]
BSD
388
jupyter-incubator__sparkmagic-128
c3fd225a599345175240d8e96673102aacf3f624
2016-01-19 04:02:31
c3fd225a599345175240d8e96673102aacf3f624
diff --git a/remotespark/wrapperkernel/codetransformers.py b/remotespark/wrapperkernel/codetransformers.py index 41136ad..1cec56a 100644 --- a/remotespark/wrapperkernel/codetransformers.py +++ b/remotespark/wrapperkernel/codetransformers.py @@ -157,3 +157,14 @@ class LogsTransformer(UserCodeTransformerBase): code_to_run = "print('No logs yet.')" return code_to_run, error_to_show, begin_action, end_action, deletes_session + + +class PythonTransformer(UserCodeTransformerBase): + def get_code_to_execute(self, session_started, connection_string, force, output_var, command): + error_to_show = None + code_to_run = command + begin_action = Constants.do_nothing_action + end_action = Constants.do_nothing_action + deletes_session = False + + return code_to_run, error_to_show, begin_action, end_action, deletes_session diff --git a/remotespark/wrapperkernel/sparkkernelbase.py b/remotespark/wrapperkernel/sparkkernelbase.py index 60afcd6..583ed66 100644 --- a/remotespark/wrapperkernel/sparkkernelbase.py +++ b/remotespark/wrapperkernel/sparkkernelbase.py @@ -125,6 +125,8 @@ class SparkKernelBase(IPythonKernel): return CleanUpTransformer(subcommand) elif subcommand == UserCommandParser.logs_command: return LogsTransformer(subcommand) + elif subcommand == UserCommandParser.local_command: + return PythonTransformer(subcommand) else: return NotSupportedTransformer(subcommand) diff --git a/remotespark/wrapperkernel/usercommandparser.py b/remotespark/wrapperkernel/usercommandparser.py index 378fb5a..538f8ff 100644 --- a/remotespark/wrapperkernel/usercommandparser.py +++ b/remotespark/wrapperkernel/usercommandparser.py @@ -12,6 +12,7 @@ class UserCommandParser(object): delete_command = "delete" clean_up_command = "cleanup" logs_command = "logs" + local_command = "local" def __init__(self): """Code can have a magic or no magic specified (specified with %word sign). If no magic is specified, %run will
Expose %python for wrapper kernels So that user can execute custom python code, maybe in conjunction with -o for %sql results.
jupyter-incubator/sparkmagic
diff --git a/tests/test_codetransformers.py b/tests/test_codetransformers.py index c03390e..698a763 100644 --- a/tests/test_codetransformers.py +++ b/tests/test_codetransformers.py @@ -227,3 +227,17 @@ def test_logs_transformer_no_session(): assert_equals(begin_action, Constants.do_nothing_action) assert_equals(end_action, Constants.do_nothing_action) assert_equals(deletes_session, False) + + +@with_setup(_setup, _teardown) +def test_python_transformer(): + transformer = PythonTransformer("command") + + code_to_run, error_to_show, begin_action, end_action, deletes_session = \ + transformer.get_code_to_execute(False, conn, False, None, code) + + assert_equals(code, code_to_run) + assert error_to_show is None + assert_equals(begin_action, Constants.do_nothing_action) + assert_equals(end_action, Constants.do_nothing_action) + assert_equals(deletes_session, False) diff --git a/tests/test_sparkkernelbase.py b/tests/test_sparkkernelbase.py index b5e2e8f..4ec7916 100644 --- a/tests/test_sparkkernelbase.py +++ b/tests/test_sparkkernelbase.py @@ -129,6 +129,7 @@ def test_returns_right_transformer(): assert type(kernel._get_code_transformer(UserCommandParser.clean_up_command)) is CleanUpTransformer assert type(kernel._get_code_transformer(UserCommandParser.logs_command)) is LogsTransformer assert type(kernel._get_code_transformer("whatever")) is NotSupportedTransformer + assert type(kernel._get_code_transformer(UserCommandParser.local_command)) is PythonTransformer @with_setup(_setup, _teardown)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 3 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "nose", "pytest" ], "pre_install": [ "mkdir ~/.sparkmagic", "cp remotespark/default_config.json ~/.sparkmagic/config.json" ], "python": "3.7", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
anyio==3.7.1 argon2-cffi==23.1.0 argon2-cffi-bindings==21.2.0 attrs==24.2.0 beautifulsoup4==4.13.3 bleach==6.0.0 certifi @ file:///croot/certifi_1671487769961/work/certifi cffi==1.15.1 charset-normalizer==3.4.1 comm==0.1.4 decorator==5.1.1 defusedxml==0.7.1 entrypoints==0.4 exceptiongroup==1.2.2 fastjsonschema==2.21.1 idna==3.10 importlib-metadata==6.7.0 importlib-resources==5.12.0 iniconfig==2.0.0 ipykernel==4.2.2 ipython==4.0.2 ipython-genutils==0.2.0 ipywidgets==7.8.5 Jinja2==3.1.6 jsonschema==4.17.3 jupyter-server==1.24.0 jupyter_client==7.4.9 jupyter_core==4.12.0 jupyterlab-pygments==0.2.2 jupyterlab_widgets==1.1.11 MarkupSafe==2.1.5 mistune==3.0.2 mock==5.2.0 nbclassic==1.2.0 nbclient==0.7.4 nbconvert==7.6.0 nbformat==5.8.0 nest-asyncio==1.6.0 nose==1.3.7 notebook==6.5.7 notebook_shim==0.2.4 numpy==1.21.6 packaging==24.0 pandas==1.3.5 pandocfilters==1.5.1 pexpect==4.9.0 pickleshare==0.7.5 pkgutil_resolve_name==1.3.10 plotly==1.9.4 pluggy==1.2.0 prometheus-client==0.17.1 ptyprocess==0.7.0 pycparser==2.21 Pygments==2.17.2 pyrsistent==0.19.3 pytest==7.4.4 python-dateutil==2.9.0.post0 pytz==2025.2 pyzmq==26.2.1 -e git+https://github.com/jupyter-incubator/sparkmagic.git@c3fd225a599345175240d8e96673102aacf3f624#egg=remotespark requests==2.31.0 Send2Trash==1.8.3 simplegeneric==0.8.1 six==1.17.0 sniffio==1.3.1 soupsieve==2.4.1 terminado==0.17.1 tinycss2==1.2.1 tomli==2.0.1 tornado==6.2 traitlets==5.9.0 typing_extensions==4.7.1 urllib3==2.0.7 webencodings==0.5.1 websocket-client==1.6.1 widgetsnbextension==3.6.10 zipp==3.15.0
name: sparkmagic channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - anyio==3.7.1 - argon2-cffi==23.1.0 - argon2-cffi-bindings==21.2.0 - attrs==24.2.0 - beautifulsoup4==4.13.3 - bleach==6.0.0 - cffi==1.15.1 - charset-normalizer==3.4.1 - comm==0.1.4 - decorator==5.1.1 - defusedxml==0.7.1 - entrypoints==0.4 - exceptiongroup==1.2.2 - fastjsonschema==2.21.1 - idna==3.10 - importlib-metadata==6.7.0 - importlib-resources==5.12.0 - iniconfig==2.0.0 - ipykernel==4.2.2 - ipython==4.0.2 - ipython-genutils==0.2.0 - ipywidgets==7.8.5 - jinja2==3.1.6 - jsonschema==4.17.3 - jupyter-client==7.4.9 - jupyter-core==4.12.0 - jupyter-server==1.24.0 - jupyterlab-pygments==0.2.2 - jupyterlab-widgets==1.1.11 - markupsafe==2.1.5 - mistune==3.0.2 - mock==5.2.0 - nbclassic==1.2.0 - nbclient==0.7.4 - nbconvert==7.6.0 - nbformat==5.8.0 - nest-asyncio==1.6.0 - nose==1.3.7 - notebook==6.5.7 - notebook-shim==0.2.4 - numpy==1.21.6 - packaging==24.0 - pandas==1.3.5 - pandocfilters==1.5.1 - pexpect==4.9.0 - pickleshare==0.7.5 - pkgutil-resolve-name==1.3.10 - plotly==1.9.4 - pluggy==1.2.0 - prometheus-client==0.17.1 - ptyprocess==0.7.0 - pycparser==2.21 - pygments==2.17.2 - pyrsistent==0.19.3 - pytest==7.4.4 - python-dateutil==2.9.0.post0 - pytz==2025.2 - pyzmq==26.2.1 - requests==2.31.0 - send2trash==1.8.3 - simplegeneric==0.8.1 - six==1.17.0 - sniffio==1.3.1 - soupsieve==2.4.1 - terminado==0.17.1 - tinycss2==1.2.1 - tomli==2.0.1 - tornado==6.2 - traitlets==5.9.0 - typing-extensions==4.7.1 - urllib3==2.0.7 - webencodings==0.5.1 - websocket-client==1.6.1 - widgetsnbextension==3.6.10 - zipp==3.15.0 prefix: /opt/conda/envs/sparkmagic
[ "tests/test_codetransformers.py::test_python_transformer", "tests/test_sparkkernelbase.py::test_returns_right_transformer" ]
[]
[ "tests/test_codetransformers.py::test_not_supported_transformer", "tests/test_codetransformers.py::test_config_transformer_no_session", "tests/test_codetransformers.py::test_config_transformer_session_no_flag", "tests/test_codetransformers.py::test_config_transformer_session_flag", "tests/test_codetransformers.py::test_spark_transformer", "tests/test_codetransformers.py::test_sql_transformer", "tests/test_codetransformers.py::test_sql_transformer_output_var", "tests/test_codetransformers.py::test_hive_transformer", "tests/test_codetransformers.py::test_info_transformer", "tests/test_codetransformers.py::test_delete_transformer_no_force", "tests/test_codetransformers.py::test_delete_transformer_force", "tests/test_codetransformers.py::test_cleanup_transformer_no_force", "tests/test_codetransformers.py::test_cleanup_transformer_force", "tests/test_codetransformers.py::test_logs_transformer_session", "tests/test_codetransformers.py::test_logs_transformer_no_session", "tests/test_sparkkernelbase.py::test_get_config", "tests/test_sparkkernelbase.py::test_get_config_not_set", "tests/test_sparkkernelbase.py::test_get_config_not_set_empty_strings", "tests/test_sparkkernelbase.py::test_initialize_magics", "tests/test_sparkkernelbase.py::test_start_session", "tests/test_sparkkernelbase.py::test_delete_session", "tests/test_sparkkernelbase.py::test_instructions_from_parser_are_passed_to_transformer", "tests/test_sparkkernelbase.py::test_instructions_from_transformer_are_executed_code", "tests/test_sparkkernelbase.py::test_instructions_from_transformer_are_executed_error", "tests/test_sparkkernelbase.py::test_instructions_from_transformer_are_executed_deletes_session", "tests/test_sparkkernelbase.py::test_instructions_from_transformer_are_executed_begin_start", "tests/test_sparkkernelbase.py::test_instructions_from_transformer_are_executed_begin_delete", "tests/test_sparkkernelbase.py::test_instructions_from_transformer_are_executed_end_start", "tests/test_sparkkernelbase.py::test_instructions_from_transformer_are_executed_end_delete", "tests/test_sparkkernelbase.py::test_instructions_from_transformer_are_executed_begin_start_end_delete", "tests/test_sparkkernelbase.py::test_execute_throws_if_fatal_error_happened", "tests/test_sparkkernelbase.py::test_execute_throws_if_fatal_error_happens_for_execution", "tests/test_sparkkernelbase.py::test_shutdown_cleans_up", "tests/test_sparkkernelbase.py::test_register_auto_viz" ]
[]
Modified BSD License
389
wong2__pick-3
38bab1f33ff03936906435d5458765493e4c2c1c
2016-01-20 08:40:03
38bab1f33ff03936906435d5458765493e4c2c1c
diff --git a/example/scroll.py b/example/scroll.py new file mode 100644 index 0000000..6f34b22 --- /dev/null +++ b/example/scroll.py @@ -0,0 +1,10 @@ +#-*-coding:utf-8-*- + +from __future__ import print_function + +from pick import pick + +title = 'Select:' +options = ['foo.bar%s.baz' % x for x in range(1, 71)] +option, index = pick(options, title) +print(option, index) diff --git a/pick/__init__.py b/pick/__init__.py index 1f8ccfc..f004b84 100644 --- a/pick/__init__.py +++ b/pick/__init__.py @@ -49,14 +49,13 @@ class Picker(object): """ return self.options[self.index], self.index - def draw(self): - """draw the curses ui on the screen""" - self.screen.clear() - - x, y = 1, 1 + def get_title_lines(self): if self.title: - self.screen.addstr(y, x, self.title) - y += 2 + return self.title.split('\n') + [''] + return [] + + def get_option_lines(self): + lines = [] for index, option in enumerate(self.options): if index == self.index: @@ -64,13 +63,53 @@ class Picker(object): else: prefix = len(self.indicator) * ' ' line = '{0} {1}'.format(prefix, option) + lines.append(line) + + return lines + + def get_lines(self): + title_lines = self.get_title_lines() + option_lines = self.get_option_lines() + lines = title_lines + option_lines + current_line = self.index + len(title_lines) + 1 + return lines, current_line + + def draw(self): + """draw the curses ui on the screen, handle scroll if needed""" + self.screen.clear() + + x, y = 1, 1 # start point + max_y, max_x = self.screen.getmaxyx() + max_rows = max_y - y # the max rows we can draw + + lines, current_line = self.get_lines() + + # calculate how many lines we should scroll, relative to the top + scroll_top = getattr(self, 'scroll_top', 0) + if current_line <= scroll_top: + scroll_top = 0 + elif current_line - scroll_top > max_rows: + scroll_top = current_line - max_rows + self.scroll_top = scroll_top + + lines_to_draw = lines[scroll_top:scroll_top+max_rows] + + for line in lines_to_draw: self.screen.addstr(y, x, line) y += 1 self.screen.refresh() - def start(self): - return curses.wrapper(self.run_loop) + def run_loop(self): + while True: + self.draw() + c = self.screen.getch() + if c in KEYS_UP: + self.move_up() + elif c in KEYS_DOWN: + self.move_down() + elif c in KEYS_ENTER: + return self.get_selected() def config_curses(self): # use the default colors of the terminal @@ -78,21 +117,13 @@ class Picker(object): # hide the cursor curses.curs_set(0) - def run_loop(self, screen): - self.config_curses() + def _start(self, screen): self.screen = screen - self.draw() + self.config_curses() + return self.run_loop() - while True: - c = self.screen.getch() - if c in KEYS_UP: - self.move_up() - self.draw() - elif c in KEYS_DOWN: - self.move_down() - self.draw() - elif c in KEYS_ENTER: - return self.get_selected() + def start(self): + return curses.wrapper(self._start) def pick(options, title=None, indicator='*', default_index=0):
Long lists issue I have an issue with `pick` while passing a long list, here is an example you can try it: ```python from pick import pick title = 'Select: ' options = ['foo.bar1.baz', 'foo.bar2.baz', 'foo.bar3.baz', 'foo.bar4.baz', 'foo.bar5.baz', 'foo.bar6.baz', 'foo.bar7.baz','foo.bar8.baz', 'foo.bar9.baz', 'foo.bar10.baz','foo.bar11.baz', 'foo.bar12.baz', 'foo.bar13.baz', 'foo.bar14.baz', 'foo.bar15.baz', 'foo.bar16.baz', 'foo.bar17.baz','foo.bar18.baz', 'foo.bar19.baz', 'foo.bar20.baz','foo.bar21.baz', 'foo.bar22.baz', 'foo.bar23.baz', 'foo.bar24.baz', 'foo.bar25.baz', 'foo.bar26.baz', 'foo.bar27.baz','foo.bar28.baz', 'foo.bar29.baz', 'foo.bar30.baz','foo.bar31.baz', 'foo.bar32.baz', 'foo.bar33.baz', 'foo.bar34.baz', 'foo.bar35.baz', 'foo.bar36.baz', 'foo.bar37.baz','foo.bar38.baz', 'foo.bar39.baz', 'foo.bar40.baz','foo.bar41.baz', 'foo.bar42.baz', 'foo.bar43.baz', 'foo.bar44.baz', 'foo.bar45.baz', 'foo.bar46.baz', 'foo.bar47.baz','foo.bar48.baz', 'foo.bar49.baz', 'foo.bar50.baz','foo.bar51.baz', 'foo.bar52.baz', 'foo.bar53.baz', 'foo.bar54.baz', 'foo.bar55.baz', 'foo.bar56.baz', 'foo.bar57.baz','foo.bar58.baz', 'foo.bar59.baz', 'foo.bar60.baz'] option, index = pick(options, title) ``` the result will be: ``` Traceback (most recent call last): File "pick_test.py", line 5, in <module> option, index = pick(options, title) File "/usr/lib/python2.7/site-packages/pick/__init__.py", line 109, in pick return picker.start() File "/usr/lib/python2.7/site-packages/pick/__init__.py", line 73, in start return curses.wrapper(self.run_loop) File "/usr/lib64/python2.7/curses/wrapper.py", line 43, in wrapper return func(stdscr, *args, **kwds) File "/usr/lib/python2.7/site-packages/pick/__init__.py", line 84, in run_loop self.draw() File "/usr/lib/python2.7/site-packages/pick/__init__.py", line 67, in draw self.screen.addstr(y, x, line) _curses.error: addstr() returned ERR ```
wong2/pick
diff --git a/tests/test_pick.py b/tests/test_pick.py index f5b7dd4..21eecc4 100644 --- a/tests/test_pick.py +++ b/tests/test_pick.py @@ -1,12 +1,12 @@ #-*-coding:utf-8-*- import unittest -from pick import pick, Picker +from pick import Picker class TestPick(unittest.TestCase): - def test_pick(self): + def test_move_up_down(self): title = 'Please choose an option: ' options = ['option1', 'option2', 'option3'] picker = Picker(options, title) @@ -16,6 +16,26 @@ class TestPick(unittest.TestCase): picker.move_down() assert picker.get_selected() == ('option2', 1) + def test_default_index(self): + title = 'Please choose an option: ' + options = ['option1', 'option2', 'option3'] + picker = Picker(options, title, default_index=1) + assert picker.get_selected() == ('option2', 1) + + def test_get_lines(self): + title = 'Please choose an option: ' + options = ['option1', 'option2', 'option3'] + picker = Picker(options, title, indicator='*') + lines, current_line = picker.get_lines() + assert lines == [title, '', '* option1', ' option2', ' option3'] + assert current_line == 3 + + def test_no_title(self): + options = ['option1', 'option2', 'option3'] + picker = Picker(options) + lines, current_line = picker.get_lines() + assert current_line == 1 + if __name__ == '__main__': unittest.main()
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_added_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 1 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "nose", "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work nose==1.3.7 packaging @ file:///croot/packaging_1734472117206/work -e git+https://github.com/wong2/pick.git@38bab1f33ff03936906435d5458765493e4c2c1c#egg=pick pluggy @ file:///croot/pluggy_1733169602837/work pytest @ file:///croot/pytest_1738938843180/work tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
name: pick channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - nose==1.3.7 prefix: /opt/conda/envs/pick
[ "tests/test_pick.py::TestPick::test_get_lines", "tests/test_pick.py::TestPick::test_no_title" ]
[]
[ "tests/test_pick.py::TestPick::test_default_index", "tests/test_pick.py::TestPick::test_move_up_down" ]
[]
MIT License
390
phobson__paramnormal-24
9e0395b03e042d0ad4f0d6be12c768103233dc27
2016-01-21 15:07:30
9e0395b03e042d0ad4f0d6be12c768103233dc27
diff --git a/docs/tutorial/fitting.ipynb b/docs/tutorial/fitting.ipynb index b5434fa..364bccb 100644 --- a/docs/tutorial/fitting.ipynb +++ b/docs/tutorial/fitting.ipynb @@ -11,6 +11,17 @@ "Again, we'll demonstrate with a lognormal distribution and compare parameter estimatation with scipy." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, { "cell_type": "code", "execution_count": null, @@ -28,9 +39,7 @@ "clean_bkgd = {'axes.facecolor':'none', 'figure.facecolor':'none'}\n", "seaborn.set(style='ticks', rc=clean_bkgd)\n", "\n", - "import paramnormal\n", - "\n", - "%matplotlib inline" + "import paramnormal" ] }, { diff --git a/docs/tutorial/overview.ipynb b/docs/tutorial/overview.ipynb index 26e7506..d3f0fc6 100644 --- a/docs/tutorial/overview.ipynb +++ b/docs/tutorial/overview.ipynb @@ -11,6 +11,17 @@ "The main problem that `paramnormal` is trying to solve is that sometimes, creating a probability distribution using these parameters (and others) in `scipy.stats` can be confusing. Also the parameters in `numpy.random` can be inconsistently named (admittedly, just a minor inconvenience). " ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "%matplotlib inline" + ] + }, { "cell_type": "code", "execution_count": null, @@ -147,11 +158,7 @@ }, "outputs": [], "source": [ - "distributions = filter(\n", - " lambda d: not d.startswith('__') and d not in ['BaseDist_Mixin', 'stats', 'namedtuple', 'numpy', 'utils'], \n", - " dir(paramnormal.paramnormal)\n", - ")\n", - "for d in distributions:\n", + "for d in paramnormal.paramnormal.__all__:\n", " print(d)" ] }, diff --git a/paramnormal/paramnormal.py b/paramnormal/paramnormal.py index 6faae1a..7228ea0 100644 --- a/paramnormal/paramnormal.py +++ b/paramnormal/paramnormal.py @@ -439,7 +439,7 @@ class beta(BaseDist_Mixin): >>> # silly fake data >>> numpy.random.seed(0) - >>> pn.beta(alpha=2, beta=5).rvs(size=37) + >>> data = pn.beta(alpha=2, beta=5).rvs(size=37) >>> # pretend `data` is unknown and we want to fit a dist. to it >>> pn.beta.fit(data) params(alpha=1.6784891179355, beta=4.2459121691279, loc=0, scale=1) @@ -526,7 +526,7 @@ class gamma(BaseDist_Mixin): >>> # silly fake data >>> numpy.random.seed(0) - >>> pn.gamma(k=2, θ=5).rvs(size=37) + >>> data = pn.gamma(k=2, θ=5).rvs(size=37) >>> # pretend `data` is unknown and we want to fit a dist. to it >>> pn.gamma.fit(data) params(k=1.3379069223213478, loc=0, theta=7.5830062081633587) @@ -607,7 +607,7 @@ class chi_squared(BaseDist_Mixin): >>> # silly fake data >>> numpy.random.seed(0) - >>> pn.chi_squared(k=2).rvs(size=37) + >>> data = pn.chi_squared(k=2).rvs(size=37) >>> # pretend `data` is unknown and we want to fit a dist. to it >>> pn.chi_squared.fit(data) params(k=2.2668945312500028, loc=0, scale=1) @@ -652,8 +652,8 @@ class pareto(BaseDist_Mixin): Use scipy's maximum likelihood estimation methods to estimate the parameters of the data's distribution. By default, `loc` and `scale` are fixed at 0 and 1, respectively. Thus, only - `alpha` and `beta` are estimated unless `loc` or `scale` are - explicitly set to `None`. + `alpha` is estimated unless `loc` or `scale` are explicitly + set to `None`. from_params(params) Create a new distribution instances from the namedtuple result of the :meth:`~fit` method. @@ -668,7 +668,7 @@ class pareto(BaseDist_Mixin): respectively. .. note :: - When fitting a beta distribution to a dataset, this will + When fitting a pareto distribution to a dataset, this will be fixed at its default value unless you explicitly set it to other values. Set to `None` if you wish that it be estimated entirely from scratch. @@ -688,7 +688,7 @@ class pareto(BaseDist_Mixin): >>> # silly fake data >>> numpy.random.seed(0) - >>> pn.pareto(alpha=2).rvs(size=37) + >>> data = pn.pareto(alpha=2).rvs(size=37) >>> # pretend `data` is unknown and we want to fit a dist. to it >>> pn.pareto.fit(data) params(alpha=1.7850585937500019, loc=0, scale=1) @@ -699,7 +699,7 @@ class pareto(BaseDist_Mixin): References ---------- - http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.chi2.html + http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pareto.html https://en.wikipedia.org/wiki/pareto_distribution See Also @@ -721,3 +721,95 @@ class pareto(BaseDist_Mixin): else: key = 'b' return {key: alpha, loc_key: loc, scale_key: scale} + + +class exponential(BaseDist_Mixin): + """ + Create and fit data to an exponential distribution. + + Methods + ------- + fit(data, **guesses) + Use scipy's maximum likelihood estimation methods to estimate + the parameters of the data's distribution. By default, `loc` + isfixed at 0. Thus, only `lambda_` is estimated unless `loc` is + explicitly set to `None`. + from_params(params) + Create a new distribution instances from the namedtuple result + of the :meth:`~fit` method. + + Parameters + ---------- + lambda_ : float + The shape parameter of the distribution. + loc : float, optional + Location parameter of the distribution. This default to, and + should probably be left at, 0, + + .. note :: + When fitting an exponential distribution to a dataset, this + will be fixed at its default value unless you explicitly set + it to other values. Set to `None` if you wish that it be + estimated entirely from scratch. + + Examples + -------- + >>> import numpy + >>> import paramnormal as pn + >>> numpy.random.seed(0) + >>> pn.exponential(lambda_=2).rvs(size=3) + array([ 0.39793725, 0.62796538, 0.46161157]) + + >>> # you can also use greek letters + >>> numpy.random.seed(0) + >>> pn.exponential(λ=2).rvs(size=3) + array([ 0.39793725, 0.62796538, 0.46161157]) + + >>> # silly fake data + >>> numpy.random.seed(0) + >>> data = pn.exponential(λ=2).rvs(size=37) + >>> # pretend `data` is unknown and we want to fit a dist. to it + >>> pn.exponential.fit(data) + params(lambda_=1.7849050026146085, loc=0) + + >>> # include `loc` in the estimate + >>> pn.exponential.fit(data, loc=None) + params(lambda_=1.8154701618164411, loc=0.0094842718426853996) + + References + ---------- + http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.expon.html + https://en.wikipedia.org/wiki/exponential_distribution + + See Also + -------- + scipy.stats.expon + numpy.random.exponential + + """ + dist = stats.expon + param_template = namedtuple('params', ['lambda_', 'loc']) + + @staticmethod + @utils.greco_deco + def _process_args(lambda_=None, loc=0, fit=False): + loc_key, scale_key = utils._get_loc_scale_keys(fit=fit) + return {loc_key: loc, scale_key: lambda_**-1 if lambda_ is not None else lambda_} + + @classmethod + def fit(cls, data, **guesses): + params = cls._fit(data, **guesses) + return cls.param_template(loc=params[0], lambda_=params[1]**-1) + + +__all__ = [ + 'normal', + 'lognormal', + 'weibull', + 'alpha', + 'beta', + 'gamma', + 'chi_squared', + 'pareto', + 'exponential', +] \ No newline at end of file diff --git a/paramnormal/utils.py b/paramnormal/utils.py index fd3df0e..da50755 100644 --- a/paramnormal/utils.py +++ b/paramnormal/utils.py @@ -8,7 +8,8 @@ SYMBOLS = { 'α': 'alpha', 'β': 'beta', 'γ': 'gamma', - 'θ': 'theta' + 'λ': 'lambda_', + 'θ': 'theta', }
add exponential distribution seems pretty critical
phobson/paramnormal
diff --git a/paramnormal/tests/test_paramnormal.py b/paramnormal/tests/test_paramnormal.py index 8fde0c3..73de100 100644 --- a/paramnormal/tests/test_paramnormal.py +++ b/paramnormal/tests/test_paramnormal.py @@ -342,3 +342,36 @@ class Test_pareto(CheckDist_Mixin): (params.loc, 0), (params.scale, 1), ) + + +class Test_exponential(CheckDist_Mixin): + def setup(self): + self.dist = paramnormal.exponential + self.cargs = [] + self.ckwds = dict(lambda_=2) + + self.np_rand_fxn = numpy.random.exponential + self.npargs = [0.5] + self.npkwds = dict() + + def test_process_args(self): + nt.assert_dict_equal( + self.dist._process_args(lambda_=2.0), + dict(loc=0, scale=0.5) + ) + + nt.assert_dict_equal( + self.dist._process_args(lambda_=2.0, fit=True), + dict(floc=0, fscale=0.5) + ) + + @seed + def test_fit(self): + data = numpy.random.exponential(0.5, size=37) + params = self.dist.fit(data) + check_params( + (params.lambda_, 1.7849050026146085), + (params.loc, 0), + ) + +
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 3, "test_score": 0 }, "num_modified_files": 4 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "nose", "pytest" ], "pre_install": [], "python": "3.5", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 importlib-metadata==4.8.3 iniconfig==1.1.1 nose==1.3.7 numpy==1.19.5 packaging==21.3 -e git+https://github.com/phobson/paramnormal.git@9e0395b03e042d0ad4f0d6be12c768103233dc27#egg=paramnormal pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 scipy==1.5.4 tomli==1.2.3 typing_extensions==4.1.1 zipp==3.6.0
name: paramnormal channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - nose==1.3.7 - numpy==1.19.5 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - scipy==1.5.4 - tomli==1.2.3 - typing-extensions==4.1.1 - zipp==3.6.0 prefix: /opt/conda/envs/paramnormal
[ "paramnormal/tests/test_paramnormal.py::Test_exponential::test_random_0010", "paramnormal/tests/test_paramnormal.py::Test_exponential::test_random_0037", "paramnormal/tests/test_paramnormal.py::Test_exponential::test_random_0100", "paramnormal/tests/test_paramnormal.py::Test_exponential::test_random_3737", "paramnormal/tests/test_paramnormal.py::Test_exponential::test_from_params", "paramnormal/tests/test_paramnormal.py::Test_exponential::test_process_args" ]
[ "paramnormal/tests/test_paramnormal.py::Test_exponential::test_fit" ]
[ "paramnormal/tests/test_paramnormal.py::Test_normal::test_random_0010", "paramnormal/tests/test_paramnormal.py::Test_normal::test_random_0037", "paramnormal/tests/test_paramnormal.py::Test_normal::test_random_0100", "paramnormal/tests/test_paramnormal.py::Test_normal::test_random_3737", "paramnormal/tests/test_paramnormal.py::Test_normal::test_from_params", "paramnormal/tests/test_paramnormal.py::Test_normal::test_processargs", "paramnormal/tests/test_paramnormal.py::Test_normal::test_fit", "paramnormal/tests/test_paramnormal.py::Test_lognormal::test_random_0010", "paramnormal/tests/test_paramnormal.py::Test_lognormal::test_random_0037", "paramnormal/tests/test_paramnormal.py::Test_lognormal::test_random_0100", "paramnormal/tests/test_paramnormal.py::Test_lognormal::test_random_3737", "paramnormal/tests/test_paramnormal.py::Test_lognormal::test_from_params", "paramnormal/tests/test_paramnormal.py::Test_lognormal::test_process_args", "paramnormal/tests/test_paramnormal.py::Test_lognormal::test_process_args_no_offset", "paramnormal/tests/test_paramnormal.py::Test_lognormal::test_fit", "paramnormal/tests/test_paramnormal.py::Test_weibull::test_random_0010", "paramnormal/tests/test_paramnormal.py::Test_weibull::test_random_0037", "paramnormal/tests/test_paramnormal.py::Test_weibull::test_random_0100", "paramnormal/tests/test_paramnormal.py::Test_weibull::test_random_3737", "paramnormal/tests/test_paramnormal.py::Test_weibull::test_from_params", "paramnormal/tests/test_paramnormal.py::Test_weibull::test_process_args", "paramnormal/tests/test_paramnormal.py::Test_weibull::test_fit", "paramnormal/tests/test_paramnormal.py::Test_alpha::test_random_0010", "paramnormal/tests/test_paramnormal.py::Test_alpha::test_random_0037", "paramnormal/tests/test_paramnormal.py::Test_alpha::test_random_0100", "paramnormal/tests/test_paramnormal.py::Test_alpha::test_random_3737", "paramnormal/tests/test_paramnormal.py::Test_alpha::test_from_params", "paramnormal/tests/test_paramnormal.py::Test_alpha::test_process_args", "paramnormal/tests/test_paramnormal.py::Test_alpha::test_fit", "paramnormal/tests/test_paramnormal.py::Test_beta::test_random_0010", "paramnormal/tests/test_paramnormal.py::Test_beta::test_random_0037", "paramnormal/tests/test_paramnormal.py::Test_beta::test_random_0100", "paramnormal/tests/test_paramnormal.py::Test_beta::test_random_3737", "paramnormal/tests/test_paramnormal.py::Test_beta::test_from_params", "paramnormal/tests/test_paramnormal.py::Test_beta::test_process_args", "paramnormal/tests/test_paramnormal.py::Test_beta::test_fit", "paramnormal/tests/test_paramnormal.py::Test_gamma::test_random_0010", "paramnormal/tests/test_paramnormal.py::Test_gamma::test_random_0037", "paramnormal/tests/test_paramnormal.py::Test_gamma::test_random_0100", "paramnormal/tests/test_paramnormal.py::Test_gamma::test_random_3737", "paramnormal/tests/test_paramnormal.py::Test_gamma::test_from_params", "paramnormal/tests/test_paramnormal.py::Test_gamma::test_process_args", "paramnormal/tests/test_paramnormal.py::Test_gamma::test_fit", "paramnormal/tests/test_paramnormal.py::Test_chi_squared::test_random_0010", "paramnormal/tests/test_paramnormal.py::Test_chi_squared::test_random_0037", "paramnormal/tests/test_paramnormal.py::Test_chi_squared::test_random_0100", "paramnormal/tests/test_paramnormal.py::Test_chi_squared::test_random_3737", "paramnormal/tests/test_paramnormal.py::Test_chi_squared::test_from_params", "paramnormal/tests/test_paramnormal.py::Test_chi_squared::test_process_args", "paramnormal/tests/test_paramnormal.py::Test_chi_squared::test_fit", "paramnormal/tests/test_paramnormal.py::Test_pareto::test_random_0010", "paramnormal/tests/test_paramnormal.py::Test_pareto::test_random_0037", "paramnormal/tests/test_paramnormal.py::Test_pareto::test_random_0100", "paramnormal/tests/test_paramnormal.py::Test_pareto::test_random_3737", "paramnormal/tests/test_paramnormal.py::Test_pareto::test_from_params", "paramnormal/tests/test_paramnormal.py::Test_pareto::test_process_args", "paramnormal/tests/test_paramnormal.py::Test_pareto::test_fit" ]
[]
MIT License
391
cdent__wsgi-intercept-35
3048a0921675822f5f27567d63ecca094077dc25
2016-01-21 19:21:32
3048a0921675822f5f27567d63ecca094077dc25
diff --git a/wsgi_intercept/__init__.py b/wsgi_intercept/__init__.py index 487cd76..b659d75 100644 --- a/wsgi_intercept/__init__.py +++ b/wsgi_intercept/__init__.py @@ -50,6 +50,11 @@ Note especially that ``app_create_fn`` is a *function object* returning a WSGI application; ``script_name`` becomes ``SCRIPT_NAME`` in the WSGI app's environment, if set. +Note also that if ``http_proxy`` or ``https_proxy`` is set in the environment +this can cause difficulties with some of the intercepted libraries. If +requests or urllib is being used, these will raise an exception if one of +those variables is set. + Install ======= diff --git a/wsgi_intercept/requests_intercept.py b/wsgi_intercept/requests_intercept.py index cdd304a..586b752 100644 --- a/wsgi_intercept/requests_intercept.py +++ b/wsgi_intercept/requests_intercept.py @@ -1,6 +1,7 @@ """Intercept HTTP connections that use `requests <http://docs.python-requests.org/en/latest/>`_. """ +import os import sys from . import WSGI_HTTPConnection, WSGI_HTTPSConnection, wsgi_fake_socket @@ -32,6 +33,9 @@ class HTTPS_WSGIInterceptor(WSGI_HTTPSConnection, HTTPSConnection): def install(): + if 'http_proxy' in os.environ or 'https_proxy' in os.environ: + raise RuntimeError( + 'http_proxy or https_proxy set in environment, please unset') HTTPConnectionPool.ConnectionCls = HTTP_WSGIInterceptor HTTPSConnectionPool.ConnectionCls = HTTPS_WSGIInterceptor diff --git a/wsgi_intercept/urllib_intercept.py b/wsgi_intercept/urllib_intercept.py index 3eca406..31d8f46 100644 --- a/wsgi_intercept/urllib_intercept.py +++ b/wsgi_intercept/urllib_intercept.py @@ -1,5 +1,8 @@ """Intercept HTTP connections that use urllib.request (Py3) aka urllib2 (Python 2). """ + +import os + try: import urllib.request as url_lib except ImportError: @@ -27,6 +30,9 @@ class WSGI_HTTPSHandler(url_lib.HTTPSHandler): def install_opener(): + if 'http_proxy' in os.environ or 'https_proxy' in os.environ: + raise RuntimeError( + 'http_proxy or https_proxy set in environment, please unset') handlers = [WSGI_HTTPHandler()] if WSGI_HTTPSHandler is not None: handlers.append(WSGI_HTTPSHandler())
kwarg error when $http_proxy is set in environ ```bash $ echo $http_proxy some_proxy.com:1234 ``` and run the [requests example](http://wsgi-intercept.readthedocs.org/en/latest/requests.html), I got this error: ``` Traceback (most recent call last): File "test.py", line 20, in <module> resp = requests.get(url) File "/usr/local/lib/python2.7/site-packages/requests/api.py", line 60, in get return request('get', url, **kwargs) File "/usr/local/lib/python2.7/site-packages/requests/api.py", line 49, in request return session.request(method=method, url=url, **kwargs) File "/usr/local/lib/python2.7/site-packages/requests/sessions.py", line 457, in request resp = self.send(prep, **send_kwargs) File "/usr/local/lib/python2.7/site-packages/requests/sessions.py", line 569, in send r = adapter.send(request, **kwargs) File "/usr/local/lib/python2.7/site-packages/requests/adapters.py", line 362, in send timeout=timeout File "/usr/local/lib/python2.7/site-packages/requests/packages/urllib3/connectionpool.py", line 511, in urlopen conn = self._get_conn(timeout=pool_timeout) File "/usr/local/lib/python2.7/site-packages/requests/packages/urllib3/connectionpool.py", line 231, in _get_conn return conn or self._new_conn() File "/usr/local/lib/python2.7/site-packages/requests/packages/urllib3/connectionpool.py", line 192, in _new_conn strict=self.strict, **self.conn_kw) File "/usr/local/lib/python2.7/site-packages/wsgi_intercept/requests_intercept.py", line 22, in __init__ WSGI_HTTPConnection.__init__(self, *args, **kwargs) TypeError: __init__() got an unexpected keyword argument 'socket_options' ``` and the kwargs of wsgi_intercept/requests_intercept.py:HTTP_WSGIInterceptor.__init__ is ``` {'strict': False, 'host': 'some_proxy.com', 'socket_options': [], 'timeout': <object object at 0x1028490e0>, 'port': 1234} ```
cdent/wsgi-intercept
diff --git a/test/install.py b/test/install.py index 225e109..65262cd 100644 --- a/test/install.py +++ b/test/install.py @@ -1,9 +1,10 @@ +import os import wsgi_intercept class BaseInstalledApp(object): def __init__(self, app, host, port=80, script_name='', - install=None, uninstall=None): + install=None, uninstall=None, proxy=None): self.app = app self.host = host self.port = port @@ -12,6 +13,7 @@ class BaseInstalledApp(object): self._uninstall = uninstall or (lambda: None) self._hits = 0 self._internals = {} + self._proxy = proxy def __call__(self, environ, start_response): self._hits += 1 @@ -32,10 +34,14 @@ class BaseInstalledApp(object): wsgi_intercept.remove_wsgi_intercept(self.host, self.port) def install(self): + if self._proxy: + os.environ['http_proxy'] = self._proxy self._install() self.install_wsgi_intercept() def uninstall(self): + if self._proxy: + del os.environ['http_proxy'] self.uninstall_wsgi_intercept() self._uninstall() @@ -56,9 +62,9 @@ def installer_class(module=None, install=None, uninstall=None): uninstall = uninstall or getattr(module, 'uninstall', None) class InstalledApp(BaseInstalledApp): - def __init__(self, app, host, port=80, script_name=''): + def __init__(self, app, host, port=80, script_name='', proxy=None): BaseInstalledApp.__init__( self, app=app, host=host, port=port, script_name=script_name, - install=install, uninstall=uninstall) + install=install, uninstall=uninstall, proxy=proxy) return InstalledApp diff --git a/test/test_http_client.py b/test/test_http_client.py index 3ca09b4..0622f6e 100644 --- a/test/test_http_client.py +++ b/test/test_http_client.py @@ -42,6 +42,18 @@ def test_other(): assert app.success() +def test_proxy_handling(): + """Proxy variable no impact.""" + with InstalledApp(wsgi_app.simple_app, host=HOST, port=80, + proxy='some.host:1234') as app: + http_client = http_lib.HTTPConnection(HOST) + http_client.request('GET', '/') + content = http_client.getresponse().read() + http_client.close() + assert content == b'WSGI intercept successful!\n' + assert app.success() + + def test_app_error(): with InstalledApp(wsgi_app.raises_app, host=HOST, port=80): http_client = http_lib.HTTPConnection(HOST) diff --git a/test/test_httplib2.py b/test/test_httplib2.py index 9fa91d1..9a67e28 100644 --- a/test/test_httplib2.py +++ b/test/test_httplib2.py @@ -47,6 +47,17 @@ def test_bogus_domain(): 'httplib2_intercept.HTTP_WSGIInterceptorWithTimeout("_nonexistant_domain_").connect()') +def test_proxy_handling(): + """Proxy has no impact.""" + with InstalledApp(wsgi_app.simple_app, host=HOST, port=80, + proxy='some_proxy.com:1234') as app: + http = httplib2.Http() + resp, content = http.request( + 'http://some_hopefully_nonexistant_domain:80/') + assert content == b'WSGI intercept successful!\n' + assert app.success() + + def test_https(): with InstalledApp(wsgi_app.simple_app, host=HOST, port=443) as app: http = httplib2.Http() diff --git a/test/test_requests.py b/test/test_requests.py index 304d178..8005f93 100644 --- a/test/test_requests.py +++ b/test/test_requests.py @@ -1,3 +1,4 @@ +import os import py.test from wsgi_intercept import requests_intercept, WSGIAppError from test import wsgi_app @@ -40,6 +41,18 @@ def test_bogus_domain(): 'requests.get("http://_nonexistant_domain_")') +def test_proxy_handling(): + with py.test.raises(RuntimeError) as exc: + with InstalledApp(wsgi_app.simple_app, host=HOST, port=80, + proxy='some_proxy.com:1234'): + requests.get('http://some_hopefully_nonexistant_domain:80/') + assert 'http_proxy or https_proxy set in environment' in str(exc.value) + # We need to do this by hand because the exception was raised + # during the entry of the context manager, so the exit handler + # wasn't reached. + del os.environ['http_proxy'] + + def test_https(): with InstalledApp(wsgi_app.simple_app, host=HOST, port=443) as app: resp = requests.get('https://some_hopefully_nonexistant_domain:443/') diff --git a/test/test_urllib.py b/test/test_urllib.py index 82daff7..83d793f 100644 --- a/test/test_urllib.py +++ b/test/test_urllib.py @@ -1,3 +1,4 @@ +import os import py.test from wsgi_intercept import urllib_intercept, WSGIAppError from test import wsgi_app @@ -32,6 +33,19 @@ def test_http_other_port(): assert environ['wsgi.url_scheme'] == 'http' +def test_proxy_handling(): + """Like requests, urllib gets confused about proxy early on.""" + with py.test.raises(RuntimeError) as exc: + with InstalledApp(wsgi_app.simple_app, host=HOST, port=80, + proxy='some.host:1234'): + url_lib.urlopen('http://some_hopefully_nonexistant_domain:80/') + assert 'http_proxy or https_proxy set in environment' in str(exc.value) + # We need to do this by hand because the exception was raised + # during the entry of the context manager, so the exit handler + # wasn't reached. + del os.environ['http_proxy'] + + def test_https(): with InstalledApp(wsgi_app.simple_app, host=HOST, port=443) as app: url_lib.urlopen('https://some_hopefully_nonexistant_domain:443/')
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 0, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 3 }
1.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[testing]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "", "pip_packages": [ "pytest>=2.4", "httplib2", "requests>=2.0.1", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.5", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==22.2.0 certifi==2021.5.30 charset-normalizer==2.0.12 httplib2==0.22.0 idna==3.10 importlib-metadata==4.8.3 iniconfig==1.1.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.1.4 pytest==7.0.1 requests==2.27.1 six==1.17.0 tomli==1.2.3 typing_extensions==4.1.1 urllib3==1.26.20 -e git+https://github.com/cdent/wsgi-intercept.git@3048a0921675822f5f27567d63ecca094077dc25#egg=wsgi_intercept zipp==3.6.0
name: wsgi-intercept channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2021.5.30=py36h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.3=he6710b0_2 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=21.2.2=py36h06a4308_0 - python=3.6.13=h12debd9_1 - readline=8.2=h5eee18b_0 - setuptools=58.0.4=py36h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.37.1=pyhd3eb1b0_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==22.2.0 - charset-normalizer==2.0.12 - httplib2==0.22.0 - idna==3.10 - importlib-metadata==4.8.3 - iniconfig==1.1.1 - packaging==21.3 - pluggy==1.0.0 - py==1.11.0 - pyparsing==3.1.4 - pytest==7.0.1 - requests==2.27.1 - six==1.17.0 - tomli==1.2.3 - typing-extensions==4.1.1 - urllib3==1.26.20 - zipp==3.6.0 prefix: /opt/conda/envs/wsgi-intercept
[ "test/test_requests.py::test_proxy_handling", "test/test_urllib.py::test_proxy_handling" ]
[ "test/test_httplib2.py::test_bogus_domain", "test/test_httplib2.py::test_https", "test/test_httplib2.py::test_https_default_port", "test/test_requests.py::test_bogus_domain", "test/test_requests.py::test_https", "test/test_requests.py::test_https_default_port", "test/test_requests.py::test_https_not_intercepted" ]
[ "test/test_http_client.py::test_http", "test/test_http_client.py::test_https", "test/test_http_client.py::test_other", "test/test_http_client.py::test_proxy_handling", "test/test_http_client.py::test_app_error", "test/test_http_client.py::test_http_not_intercepted", "test/test_http_client.py::test_https_not_intercepted", "test/test_httplib2.py::test_http", "test/test_httplib2.py::test_http_default_port", "test/test_httplib2.py::test_http_other_port", "test/test_httplib2.py::test_proxy_handling", "test/test_httplib2.py::test_app_error", "test/test_requests.py::test_http", "test/test_requests.py::test_http_default_port", "test/test_requests.py::test_http_other_port", "test/test_requests.py::test_app_error", "test/test_requests.py::test_http_not_intercepted", "test/test_urllib.py::test_http", "test/test_urllib.py::test_http_default_port", "test/test_urllib.py::test_http_other_port", "test/test_urllib.py::test_https", "test/test_urllib.py::test_https_default_port", "test/test_urllib.py::test_app_error", "test/test_urllib.py::test_http_not_intercepted", "test/test_urllib.py::test_https_not_intercepted" ]
[]
null
392
joke2k__faker-325
326e22d5752e0a28baee59c57ed0f49935de9059
2016-01-22 19:57:09
883576c2d718ad7f604415e02a898f1f917d5b86
diff --git a/faker/providers/lorem/__init__.py b/faker/providers/lorem/__init__.py index 5f07712d..dea4dd36 100644 --- a/faker/providers/lorem/__init__.py +++ b/faker/providers/lorem/__init__.py @@ -8,7 +8,8 @@ class Provider(BaseProvider): @classmethod def word(cls): """ - :example 'Lorem' + Generate a random word + :example 'lorem' """ return cls.random_element(cls.word_list) diff --git a/faker/providers/python/__init__.py b/faker/providers/python/__init__.py index 0b452478..4e5477b8 100644 --- a/faker/providers/python/__init__.py +++ b/faker/providers/python/__init__.py @@ -25,7 +25,7 @@ class Provider(BaseProvider): @classmethod def pystr(cls, max_chars=20): - return Lorem.text(max_chars) + return "".join(cls.random_letter() for i in range(max_chars)) @classmethod def pyfloat(cls, left_digits=None, right_digits=None, positive=False):
Add ability to generate lorem characters without punctuation Sometimes I want to generate a string of characters of a specific length without any punctuation or capitalization but the lorem provider currently does not allow for this.
joke2k/faker
diff --git a/faker/tests/__init__.py b/faker/tests/__init__.py index 8c20e3d5..802bee4c 100644 --- a/faker/tests/__init__.py +++ b/faker/tests/__init__.py @@ -499,6 +499,20 @@ class FactoryTestCase(unittest.TestCase): sentence = provider.sentence(0) self.assertEqual(sentence, '') + def test_random_pystr_characters(self): + from faker.providers.python import Provider + provider = Provider(None) + + characters = provider.pystr() + self.assertEqual(len(characters), 20) + characters = provider.pystr(max_chars=255) + self.assertEqual(len(characters), 255) + characters = provider.pystr(max_chars=0) + self.assertEqual(characters, '') + characters = provider.pystr(max_chars=-10) + self.assertEqual(characters, '') + + def test_us_ssn_valid(self): from faker.providers.ssn.en_US import Provider
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 3 }, "num_modified_files": 2 }
0.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup==1.2.2 -e git+https://github.com/joke2k/faker.git@326e22d5752e0a28baee59c57ed0f49935de9059#egg=fake_factory iniconfig==2.1.0 packaging==24.2 pluggy==1.5.0 pytest==8.3.5 python-dateutil==2.9.0.post0 six==1.17.0 tomli==2.2.1
name: faker channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - exceptiongroup==1.2.2 - iniconfig==2.1.0 - packaging==24.2 - pluggy==1.5.0 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - six==1.17.0 - tomli==2.2.1 prefix: /opt/conda/envs/faker
[ "faker/tests/__init__.py::FactoryTestCase::test_random_pystr_characters" ]
[]
[ "faker/tests/__init__.py::ShimsTestCase::test_counter", "faker/tests/__init__.py::UtilsTestCase::test_add_dicts", "faker/tests/__init__.py::UtilsTestCase::test_choice_distribution", "faker/tests/__init__.py::UtilsTestCase::test_find_available_locales", "faker/tests/__init__.py::UtilsTestCase::test_find_available_providers", "faker/tests/__init__.py::FactoryTestCase::test_add_provider_gives_priority_to_newly_added_provider", "faker/tests/__init__.py::FactoryTestCase::test_command", "faker/tests/__init__.py::FactoryTestCase::test_command_custom_provider", "faker/tests/__init__.py::FactoryTestCase::test_date_time_between_dates", "faker/tests/__init__.py::FactoryTestCase::test_date_time_between_dates_with_tzinfo", "faker/tests/__init__.py::FactoryTestCase::test_date_time_this_period", "faker/tests/__init__.py::FactoryTestCase::test_date_time_this_period_with_tzinfo", "faker/tests/__init__.py::FactoryTestCase::test_datetime_safe", "faker/tests/__init__.py::FactoryTestCase::test_datetimes_with_and_without_tzinfo", "faker/tests/__init__.py::FactoryTestCase::test_documentor", "faker/tests/__init__.py::FactoryTestCase::test_email", "faker/tests/__init__.py::FactoryTestCase::test_format_calls_formatter_on_provider", "faker/tests/__init__.py::FactoryTestCase::test_format_transfers_arguments_to_formatter", "faker/tests/__init__.py::FactoryTestCase::test_get_formatter_returns_callable", "faker/tests/__init__.py::FactoryTestCase::test_get_formatter_returns_correct_formatter", "faker/tests/__init__.py::FactoryTestCase::test_get_formatter_throws_exception_on_incorrect_formatter", "faker/tests/__init__.py::FactoryTestCase::test_magic_call_calls_format", "faker/tests/__init__.py::FactoryTestCase::test_magic_call_calls_format_with_arguments", "faker/tests/__init__.py::FactoryTestCase::test_no_words_paragraph", "faker/tests/__init__.py::FactoryTestCase::test_no_words_sentence", "faker/tests/__init__.py::FactoryTestCase::test_parse_returns_same_string_when_it_contains_no_curly_braces", "faker/tests/__init__.py::FactoryTestCase::test_parse_returns_string_with_tokens_replaced_by_formatters", "faker/tests/__init__.py::FactoryTestCase::test_password", "faker/tests/__init__.py::FactoryTestCase::test_prefix_suffix_always_string", "faker/tests/__init__.py::FactoryTestCase::test_random_element", "faker/tests/__init__.py::FactoryTestCase::test_slugify", "faker/tests/__init__.py::FactoryTestCase::test_timezone_conversion", "faker/tests/__init__.py::FactoryTestCase::test_us_ssn_valid", "faker/tests/__init__.py::GeneratorTestCase::test_get_random", "faker/tests/__init__.py::GeneratorTestCase::test_random_seed_doesnt_seed_system_random" ]
[]
MIT License
393
sympy__sympy-10448
425ee8695a04a17b71ea15fa045fa95efffbeb1d
2016-01-23 23:34:05
0241b35cae5f2adc04dc37b25f7dc9c5f00bd746
smichr: A strange error in mpmath has been uncovered that I will have to investigate. asmeurer: Ping @hargup and @aktech on solveset issues. smichr: - [ ] check on invert_real and invert_complex's need to use domain (b/c in complex it wasn't used) aktech: Ping me when it's ready to go. smichr: ping @aktech. If you can take a look, that would be appreciated. smichr: @aktech, can you give this a look? smichr: It's been a while since I looked at this and having looked again, I agree with you: I will move the `_first` related material to the end of the helper. smichr: @jksuom, do you have an idea about how to handle this latest issue? I am trying to do "safe simplification" by not allowing simplify to return an expression which has lost a singularity defined by some denominator, e.g. `1/(1/x+1)` is not allowed to simplify to `x/(1 + x)`. But then this disallows `factorial(x)/gamma(x)` from simplifying to 1. I wonder if I should only disallow denominators which are **Add** instances to not disappear. smichr: If there are no more comments, @aktech , I will commit this in 12h. aktech: @smichr I am sorry, for delaying this PR. Actually I am running short of time this week and next week so can't take a thorough look write now, so I would wait for @hargup to give a thumps up (he also probably have his exams write now). Sorry for the inconvenience. jksuom: > 1/(1/x+1) is not allowed to simplify to x/(1 + x). I'm afraid that trying to achieve this could result in problems hard to handle. In fact, there are reasons why I would rather allow this simplification. BTW, you probably meant that ``factorial(x)/gamma(x)`` should simplify to ``x`` (for ``x`` a positive integer). smichr: @jksuom , I added the comment to the wrong PR -- I'll move it to the other one regarding Relational simplification. hargup: @smichr Thanks for this PR it greatly simplifies things. I have added my comments and after they are addressed it should be good to go. smichr: Let's see if tests pass after the recent edit and if there are no final comments, I will commit this. Thanks for looking it over, @hargup.
diff --git a/sympy/combinatorics/perm_groups.py b/sympy/combinatorics/perm_groups.py index 37f07c0bc0..b1565fd52f 100644 --- a/sympy/combinatorics/perm_groups.py +++ b/sympy/combinatorics/perm_groups.py @@ -1306,14 +1306,14 @@ def generate_schreier_sims(self, af=False): yield x._array_form else: yield x - return + raise StopIteration if len(u) == 1: for i in basic_orbits[0]: if af: yield u[0][i]._array_form else: yield u[0][i] - return + raise StopIteration u = list(reversed(u)) basic_orbits = basic_orbits[::-1] @@ -1327,7 +1327,7 @@ def generate_schreier_sims(self, af=False): # backtrack when finished iterating over coset if pos[h] >= posmax[h]: if h == 0: - return + raise StopIteration pos[h] = 0 h -= 1 stg.pop() diff --git a/sympy/core/expr.py b/sympy/core/expr.py index 32954b2e7a..55ed6db91e 100644 --- a/sympy/core/expr.py +++ b/sympy/core/expr.py @@ -2622,7 +2622,7 @@ def _eval_lseries(self, x, logx=None): yield series.removeO() else: yield series - return + raise StopIteration while series.is_Order: n += 1 diff --git a/sympy/polys/subresultants_qq_zz.py b/sympy/polys/subresultants_qq_zz.py index a8bc77be1c..1d71909588 100644 --- a/sympy/polys/subresultants_qq_zz.py +++ b/sympy/polys/subresultants_qq_zz.py @@ -1,214 +1,13 @@ # -*- coding: utf-8 -*- """ -This module contains functions for the computation -of Euclidean, generalized Sturmian and (modified) subresultant -polynomial remainder sequences (prs's). - -The pseudo-remainder function prem() of sympy is _not_ used -by any of the functions in the module. - -Instead of prem() we use the function - -rem_z(). - -Included is also the function quo_z(). - -1. Theoretical background: -========================== -Consider the polynomials f, g ∈ Z[x] of degrees deg(f) = n and -deg(g) = m with n ≥ m. - -Definition 1: -============= -The sign sequence of a polynomial remainder sequence (prs) is the -sequence of signs of the leading coefficients of its polynomials. - -Sign sequences can be computed with the function: - -sign_seq(poly_seq, x) - -Definition 2: -============= -A polynomial remainder sequence (prs) is called complete if the -degree difference between any two consecutive polynomials is 1; -otherwise, it called incomplete. - -It is understood that f, g belong to the sequences mentioned in -the two definitions. - -1A. Euclidean and subresultant prs's: -===================================== -The subresultant prs of f, g is a sequence of polynomials in Z[x] -analogous to the Euclidean prs, the sequence obtained by applying -on f, g Euclid’s algorithm for polynomial greatest common divisors -(gcd) in Q[x]. - -The subresultant prs differs from the Euclidean prs in that the -coefficients of each polynomial in the former sequence are determinants ---- also referred to as subresultants --- of appropriately selected -sub-matrices of sylvester1(f, g, x), Sylvester’s matrix of 1840 of -dimensions (n + m) × (n + m). - -Recall that the determinant of sylvester1(f, g, x) itself is -called the resultant of f, g and serves as a criterion of whether -the two polynomials have common roots or not. - -For complete prs’s the sign sequence of the Euclidean prs of f, g -is identical to the sign sequence of the subresultant prs of f, g -and the coefficients of one sequence are easily computed from the -coefficients of the other. - -For incomplete prs’s the polynomials in the subresultant prs, generally -differ in sign from those of the Euclidean prs, and --- unlike the -case of complete prs’s --- it is not at all obvious how to compute -the coefficients of one sequence from the coefficients of the other. - -1B. Sturmian and modified subresultant prs's: -============================================= -For the same polynomials f, g ∈ Z[x] mentioned above, their ``modified'' -subresultant prs is a sequence of polynomials similar to the Sturmian -prs, the sequence obtained by applying in Q[x] Sturm’s algorithm on f, g. - -The two sequences differ in that the coefficients of each polynomial -in the modified subresultant prs are the determinants --- also referred -to as modified subresultants --- of appropriately selected sub-matrices -of sylvester2(f, g, x), Sylvester’s matrix of 1853 of dimensions 2n × 2n. - -The determinant of sylvester2 itself is called the modified resultant -of f, g and it also can serve as a criterion of whether the two -polynomials have common roots or not. - -For complete prs’s the sign sequence of the Sturmian prs of f, g is -identical to the sign sequence of the modified subresultant prs of -f, g and the coefficients of one sequence are easily computed from -the coefficients of the other. - -For incomplete prs’s the polynomials in the modified subresultant prs, -generally differ in sign from those of the Sturmian prs, and --- unlike -the case of complete prs’s --- it is not at all obvious how to compute -the coefficients of one sequence from the coefficients of the other. - -As Sylvester pointed out, the coefficients of the polynomial remainders -obtained as (modified) subresultants are the smallest possible without -introducing rationals and without computing (integer) greatest common -divisors. - -1C. On terminology: -=================== -Whence the terminology? Well generalized Sturmian prs's are -``modifications'' of Euclidean prs's; the hint came from the title -of the Pell-Gordon paper of 1917. - -In the literature one also encounters the name ``non signed'' and -``signed'' prs for Euclidean and Sturmian prs respectively. - -Likewise ``non signed'' and ``signed'' subresultant prs for -subresultant and modified subresultant prs respectively. - -2. Functions in the module: -=========================== -No function utilizes sympy's function prem(). - -2A. Matrices: -============= -The functions sylvester(f, g, x, method=1) and -sylvester(f, g, x, method=2) compute either Sylvester matrix. -They can be used to compute (modified) subresultant prs's by -direct determinant evaluation. - -The function bezout(f, g, x, method='prs') provides a matrix of -smaller dimensions than either Sylvester matrix. It is the function -of choice for computing (modified) subresultant prs's by direct -determinant evaluation. - -sylvester(f, g, x, method=1) -sylvester(f, g, x, method=2) -bezout(f, g, x, method='prs') - -The following identity holds: - -bezout(f, g, x, method='prs') = -backward_eye(deg(f))*bezout(f, g, x, method='bz')*backward_eye(deg(f)) - -2B. Subresultant and modified subresultant prs's by -=================================================== -determinant evaluation: -======================= -Instead of utilizing the Sylvester matrices, we employ -the Bezout matrix of smaller dimensions. - -subresultants_bezout(f, g, x) -modified_subresultants_bezout(f, g, x) - -2C. Subresultant prs's by ONE determinant evaluation: -===================================================== -All three functions in this section evaluate one determinant -per remainder polynomial; this is the determinant of an -appropriately selected sub-matrix of sylvester1(f, g, x), -Sylvester’s matrix of 1840. - -To compute the remainder polynomials the function -subresultants_rem(f, g, x) employs rem(f, g, x). -By contrast, the other two functions implement Van Vleck’s ideas -of 1900 and compute the remainder polynomials by trinagularizing -sylvester2(f, g, x), Sylvester’s matrix of 1853. - - -subresultants_rem(f, g, x) -subresultants_vv(f, g, x) -subresultants_vv_2(f, g, x). - -2E. Euclidean, Sturmian prs's in Q[x]: -====================================== -euclid_q(f, g, x) -sturm_q(f, g, x) - -2F. Euclidean, Sturmian and (modified) subresultant prs's P-G: -============================================================== -All functions in this section are based on the Pell-Gordon (P-G) -theorem of 1917. -Computations are done in Q[x], employing the function rem(f, g, x) -for the computation of the remainder polynomials. - -euclid_pg(f, g, x) -sturm pg(f, g, x) -subresultants_pg(f, g, x) -modified_subresultants_pg(f, g, x) - -2G. Euclidean, Sturmian and (modified) subresultant prs's A-M-V: -================================================================ -All functions in this section are based on the Akritas-Malaschonok- -Vigklas (A-M-V) theorem of 2015. -Computations are done in Z[x], employing the function rem_z(f, g, x) -for the computation of the remainder polynomials. - -euclid_amv(f, g, x) -sturm_amv(f, g, x) -subresultants_amv(f, g, x) -modified_subresultants_amv(f, g, x) - -2Ga. Exception: -=============== -subresultants_amv_q(f, g, x) - -This function employs rem(f, g, x) for the computation of -the remainder polynomials, despite the fact that it implements -the A-M-V Theorem. - -It is included in our module in order to show that theorems P-G -and A-M-V can be implemented utilizing either the function -rem(f, g, x) or the function rem_z(f, g, x). - -For clearly historical reasons --- since the Collins-Brown-Traub -coefficients-reduction factor β_i was not available in 1917 --- -we have implemented the Pell-Gordon theorem with the function -rem(f, g, x) and the A-M-V Theorem with the function rem_z(f, g, x). -""" +Created on Mon Dec 28 13:25:02 2015 +@author: alkis +""" from __future__ import print_function, division -from sympy import (Abs, degree, expand, eye, floor, LC, Matrix, nan, Poly, pprint) +from sympy import (Abs, degree, expand, floor, LC, Matrix, nan, Poly, pprint) from sympy import (QQ, quo, rem, S, sign, simplify, summation, var, zeros) def sylvester(f, g, x, method = 1): @@ -324,251 +123,6 @@ def sign_seq(poly_seq, x): """ return [sign(LC(poly_seq[i], x)) for i in range(len(poly_seq))] -def bezout(p, q, x, method='bz'): - """ - The input polynomials p, q are in Z[x] or in Q[x]. It is assumed - that degree(p, x) >= degree(q, x). - - The default option bezout(p, q, x, method='bz') returns Bezout's - symmetric matrix of p and q, of dimensions deg(p) x deg(p). The - determinant of this matrix is equal to the determinant of sylvester2, - Sylvester's matrix of 1853, whose dimensions are 2*deg(p) x 2*deg(p); - however, the subresultants of these two matrices may vary in sign. - - The other option, bezout(p, q, x, 'prs'), is of interest to us - in this module because it returns a matrix equivalent to sylvester2. - In this case all subresultants of the two matrices are identical. - - Both the subresultant polynomial remainder sequence (prs) and - the modified subresultant prs of p and q can be computed by - evaluating determinants of appropriately selected submatrices of - bezout(p, q, x, 'prs') --- one determinant per coefficient of the - remainder polynomials. - - The matrices bezout(p, q, x, 'bz') and bezout(p, q, x, 'prs') - are related by the formula - - bezout(p, q, x, 'prs') = - backward_eye(deg(p)) * bezout(p, q, x, 'bz') * backward_eye(deg(p)), - - where backward_eye() is the backward identity function. - - References: - =========== - 1. G.M.Diaz-Toca,L.Gonzalez-Vega: Various New Expressions for Subresultants - and Their Applications. Appl. Algebra in Engin., Communic. and Comp., - Vol. 15, 233–266, 2004. - - """ - y = var('y') - degP = degree(p, x) - - # expr is 0 when x = y - expr = p * q.subs({x:y}) - p.subs({x:y}) * q - - # hence expr is exactly divisible by x - y - poly = Poly( quo(expr, x-y), x, y) - - # form Bezout matrix and store them in B as indicated to get - # the LC coefficient of each poly in the first position of each row - B = zeros(degP) - for i in range(degP): - for j in range(degP): - if method == 'prs': - B[degP - 1 - i, degP - 1 - j] = poly.nth(i, j) - else: - B[i, j] = poly.nth(i, j) - return B - -def backward_eye(n): - ''' - Returns the backward identity matrix of dimensions n x n. - - Needed to "turn" the Bezout matrices - so that the leading coefficients are first. - See docstring of the function bezout(p, q, x, method='bz'). - ''' - M = eye(n) # identity matrix of order n - - for i in range(int(M.rows / 2)): - M.row_swap(0 + i, M.rows - 1 - i) - - return M - -def process_bezout_output(poly_seq, x): - """ - poly_seq is a polynomial remainder sequence computed either by - subresultants_bezout or by modified_subresultants_bezout. - - This function removes from poly_seq all zero polynomials as well - as all those whose degree is equal to the degree of a previous - polynomial in poly_seq, as we scan it from left to right. - - """ - L = poly_seq[:] # get a copy of the input sequence - d = degree(L[1], x) - i = 2 - while i < len(L): - d_i = degree(L[i], x) - if d_i < 0: # zero poly - L.remove(L[i]) - i = i - 1 - if d == d_i: # poly degree equals degree of previous poly - L.remove(L[i]) - i = i - 1 - if d_i >= 0: - d = d_i - i = i + 1 - - return L - -def subresultants_bezout(p, q, x): - """ - The input polynomials p, q are in Z[x] or in Q[x]. It is assumed - that degree(p, x) >= degree(q, x). - - Computes the subresultant polynomial remainder sequence - of p, q by evaluating determinants of appropriately selected - submatrices of bezout(p, q, x, 'prs'). The dimensions of the - latter are deg(p) x deg(p). - - Each coefficient is computed by evaluating the determinant of the - corresponding submatrix of bezout(p, q, x, 'prs'). - - bezout(p, q, x, 'prs) is used instead of sylvester(p, q, x, 1), - Sylvester's matrix of 1840, because the dimensions of the latter - are (deg(p) + deg(q)) x (deg(p) + deg(q)). - - If the subresultant prs is complete, then the output coincides - with the Euclidean sequence of the polynomials p, q. - - References: - =========== - 1. G.M.Diaz-Toca,L.Gonzalez-Vega: Various New Expressions for Subresultants - and Their Applications. Appl. Algebra in Engin., Communic. and Comp., - Vol. 15, 233–266, 2004. - - """ - # make sure neither p nor q is 0 - if p == 0 or q == 0: - return [p, q] - - f, g = p, q - n = degF = degree(f, x) - m = degG = degree(g, x) - - # make sure proper degrees - if n == 0 and m == 0: - return [f, g] - if n < m: - n, m, degF, degG, f, g = m, n, degG, degF, g, f - if n > 0 and m == 0: - return [f, g] - - SR_L = [f, g] # subresultant list - F = LC(f, x)**(degF - degG) - - # form the bezout matrix - B = bezout(f, g, x, 'prs') - - # pick appropriate submatrices of B - # and form subresultant polys - if degF > degG: - j = 2 - if degF == degG: - j = 1 - while j <= degF: - M = B[0:j, :] - k, coeff_L = j - 1, [] - while k <= degF - 1: - coeff_L.append(M[: ,0 : j].det()) - if k < degF - 1: - M.col_swap(j - 1, k + 1) - k = k + 1 - - # apply Theorem 2.1 in the paper by Toca & Vega 2004 - # to get correct signs - SR_L.append((int((-1)**(j*(j-1)/2)) * Poly(coeff_L, x) / F).as_expr()) - j = j + 1 - - return process_bezout_output(SR_L, x) - -def modified_subresultants_bezout(p, q, x): - """ - The input polynomials p, q are in Z[x] or in Q[x]. It is assumed - that degree(p, x) >= degree(q, x). - - Computes the modified subresultant polynomial remainder sequence - of p, q by evaluating determinants of appropriately selected - submatrices of bezout(p, q, x, 'prs'). The dimensions of the - latter are deg(p) x deg(p). - - Each coefficient is computed by evaluating the determinant of the - corresponding submatrix of bezout(p, q, x, 'prs'). - - bezout(p, q, x, 'prs') is used instead of sylvester(p, q, x, 2), - Sylvester's matrix of 1853, because the dimensions of the latter - are 2*deg(p) x 2*deg(p). - - If the modified subresultant prs is complete, and LC( p ) > 0, the output - coincides with the (generalized) Sturm's sequence of the polynomials p, q. - - References: - =========== - 1. Akritas, A. G., G.I. Malaschonok and P.S. Vigklas: ``Sturm Sequences - and Modified Subresultant Polynomial Remainder Sequences.'' - Serdica Journal of Computing, Vol. 8, No 1, 29–46, 2014. - - 2. G.M.Diaz-Toca,L.Gonzalez-Vega: Various New Expressions for Subresultants - and Their Applications. Appl. Algebra in Engin., Communic. and Comp., - Vol. 15, 233–266, 2004. - - - """ - # make sure neither p nor q is 0 - if p == 0 or q == 0: - return [p, q] - - f, g = p, q - n = degF = degree(f, x) - m = degG = degree(g, x) - - # make sure proper degrees - if n == 0 and m == 0: - return [f, g] - if n < m: - n, m, degF, degG, f, g = m, n, degG, degF, g, f - if n > 0 and m == 0: - return [f, g] - - SR_L = [f, g] # subresultant list - - # form the bezout matrix - B = bezout(f, g, x, 'prs') - - # pick appropriate submatrices of B - # and form subresultant polys - if degF > degG: - j = 2 - if degF == degG: - j = 1 - while j <= degF: - M = B[0:j, :] - k, coeff_L = j - 1, [] - while k <= degF - 1: - coeff_L.append(M[: ,0 : j].det()) - if k < degF - 1: - M.col_swap(j - 1, k + 1) - k = k + 1 - - ## Theorem 2.1 in the paper by Toca & Vega 2004 is _not needed_ - ## in this case since - ## the bezout matrix is equivalent to sylvester2 - SR_L.append(( Poly(coeff_L, x)).as_expr()) - j = j + 1 - - return process_bezout_output(SR_L, x) - def sturm_pg(p, q, x, method=0): """ p, q are polynomials in Z[x] or Q[x]. diff --git a/sympy/solvers/ode.py b/sympy/solvers/ode.py index aa5794e9e2..492b6b6f9a 100644 --- a/sympy/solvers/ode.py +++ b/sympy/solvers/ode.py @@ -1784,9 +1784,11 @@ def check_type(x, y): return 'type3' r1 = eq[0].match(diff(x(t),t) - f) r2 = eq[1].match(diff(y(t),t) - g) - num, denum = ((r1[f].subs(x(t),u).subs(y(t),v))/(r2[g].subs(x(t),u).subs(y(t),v))).as_numer_denom() + num, den = ( + (r1[f].subs(x(t),u).subs(y(t),v))/ + (r2[g].subs(x(t),u).subs(y(t),v))).as_numer_denom() R1 = num.match(f1*g1) - R2 = denum.match(f2*g2) + R2 = den.match(f2*g2) phi = (r1[f].subs(x(t),u).subs(y(t),v))/num if R1 and R2: return 'type4' @@ -3558,8 +3560,8 @@ def ode_2nd_power_series_ordinary(eq, func, order, match): seriesdict = {} recurr = Function("r") - # Generating the recurrence relation which works this way - # a] For the second order term the summation begins at n = 2. The coefficients + # Generating the recurrence relation which works this way: + # for the second order term the summation begins at n = 2. The coefficients # p is multiplied with an*(n - 1)*(n - 2)*x**n-2 and a substitution is made such that # the exponent of x becomes n. # For example, if p is x, then the second degree recurrence term is @@ -3635,8 +3637,7 @@ def ode_2nd_power_series_ordinary(eq, func, order, match): # Checking how many values are already present tcounter = len([t for t in finaldict.values() if t]) - for count in range(tcounter, terms - 3): # Assuming c0 and c1 to be arbitrary - #while tcounter < terms - 2: # Assuming c0 and c1 to be arbitrary + for _ in range(tcounter, terms - 3): # Assuming c0 and c1 to be arbitrary check = rhs.subs(n, startiter) nlhs = lhs.subs(n, startiter) nrhs = check.subs(finaldict) @@ -4265,7 +4266,7 @@ def abc(eq): if eq is a*x + b*f(x) + c, else None. ''' eq = _mexpand(eq) - c = eq.as_independent(x, f(x), as_Add = True)[0] + c = eq.as_independent(x, f(x), as_Add=True)[0] if not c.is_Rational: return a = eq.coeff(x) @@ -6800,10 +6801,10 @@ def sysode_linear_2eq_order2(match_): r = dict() t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0] for i in range(2): - eqs = 0 + eqs = [] for terms in Add.make_args(eq[i]): - eqs += terms/fc[i,func[i],2] - eq[i] = eqs + eqs.append(terms/fc[i,func[i],2]) + eq[i] = Add(*eqs) # for equations Eq(diff(x(t),t,t), a1*diff(x(t),t)+b1*diff(y(t),t)+c1*x(t)+d1*y(t)+e1) # and Eq(a2*diff(y(t),t,t), a2*diff(x(t),t)+b2*diff(y(t),t)+c2*x(t)+d2*y(t)+e2) r['a1'] = -fc[0,x(t),1]/fc[0,x(t),2] ; r['a2'] = -fc[1,x(t),1]/fc[1,y(t),2] @@ -7184,14 +7185,16 @@ def _linear_2eq_order2_type6(x, y, t, r, eq): C1, C2, C3, C4 = get_numbered_constants(eq, num=4) k = Symbol('k') z = Function('z') - num, denum = cancel((r['c1']*x(t) + r['d1']*y(t))/(r['c2']*x(t) + r['d2']*y(t))).as_numer_denom() + num, den = cancel( + (r['c1']*x(t) + r['d1']*y(t))/ + (r['c2']*x(t) + r['d2']*y(t))).as_numer_denom() f = r['c1']/num.coeff(x(t)) a1 = num.coeff(x(t)) b1 = num.coeff(y(t)) - a2 = denum.coeff(x(t)) - b2 = denum.coeff(y(t)) + a2 = den.coeff(x(t)) + b2 = den.coeff(y(t)) chareq = k**2 - (a1 + b2)*k + a1*b2 - a2*b1 - [k1, k2] = [rootof(chareq, k) for k in range(Poly(chareq).degree())] + k1, k2 = [rootof(chareq, k) for k in range(Poly(chareq).degree())] z1 = dsolve(diff(z(t),t,t) - k1*f*z(t)).rhs z2 = dsolve(diff(z(t),t,t) - k2*f*z(t)).rhs sol1 = (k1*z2 - k2*z1 + a1*(z1 - z2))/(a2*(k1-k2)) @@ -7229,12 +7232,14 @@ def _linear_2eq_order2_type7(x, y, t, r, eq): """ C1, C2, C3, C4 = get_numbered_constants(eq, num=4) k = Symbol('k') - num, denum = cancel((r['a1']*x(t) + r['b1']*y(t))/(r['a2']*x(t) + r['b2']*y(t))).as_numer_denom() + num, den = cancel( + (r['a1']*x(t) + r['b1']*y(t))/ + (r['a2']*x(t) + r['b2']*y(t))).as_numer_denom() f = r['a1']/num.coeff(x(t)) a1 = num.coeff(x(t)) b1 = num.coeff(y(t)) - a2 = denum.coeff(x(t)) - b2 = denum.coeff(y(t)) + a2 = den.coeff(x(t)) + b2 = den.coeff(y(t)) chareq = k**2 - (a1 + b2)*k + a1*b2 - a2*b1 [k1, k2] = [rootof(chareq, k) for k in range(Poly(chareq).degree())] F = Integral(f, t) @@ -7283,10 +7288,10 @@ def _linear_2eq_order2_type8(x, y, t, r, eq): """ C1, C2, C3, C4 = get_numbered_constants(eq, num=4) - num, denum = cancel(r['d1']/r['c2']).as_numer_denom() + num, den = cancel(r['d1']/r['c2']).as_numer_denom() f = -r['d1']/num a = num - b = denum + b = den mul = sqrt(abs(a*b)) Igral = Integral(t*f, t) if a*b > 0: @@ -7386,8 +7391,8 @@ def _linear_2eq_order2_type10(x, y, t, r, eq): q = Wild('q', exclude=[t, t**2]) s = Wild('s', exclude=[t, t**2]) n = Wild('n', exclude=[t, t**2]) - num, denum = r['c1'].as_numer_denom() - dic = denum.match((n*(p*t**2+q*t+s)**2).expand()) + num, den = r['c1'].as_numer_denom() + dic = den.match((n*(p*t**2+q*t+s)**2).expand()) eqz = dic[p]*t**2 + dic[q]*t + dic[s] a = num/dic[n] b = cancel(r['d1']*eqz**2) @@ -7938,9 +7943,11 @@ def _nonlinear_2eq_order1_type4(x, y, t, eq): g2 = Wild('g2', exclude=[u,t]) r1 = eq[0].match(diff(x(t),t) - f) r2 = eq[1].match(diff(y(t),t) - g) - num, denum = ((r1[f].subs(x(t),u).subs(y(t),v))/(r2[g].subs(x(t),u).subs(y(t),v))).as_numer_denom() + num, den = ( + (r1[f].subs(x(t),u).subs(y(t),v))/ + (r2[g].subs(x(t),u).subs(y(t),v))).as_numer_denom() R1 = num.match(f1*g1) - R2 = denum.match(f2*g2) + R2 = den.match(f2*g2) phi = (r1[f].subs(x(t),u).subs(y(t),v))/num F1 = R1[f1]; F2 = R2[f2] G1 = R1[g1]; G2 = R2[g2] diff --git a/sympy/solvers/solveset.py b/sympy/solvers/solveset.py index 999d57cbda..316911ef64 100644 --- a/sympy/solvers/solveset.py +++ b/sympy/solvers/solveset.py @@ -12,7 +12,7 @@ from sympy.core.numbers import I, Number, Rational, oo from sympy.core.function import (Lambda, expand, expand_complex) from sympy.core.relational import Eq -from sympy.simplify.simplify import simplify, fraction, trigsimp +from sympy.simplify.simplify import simplify, fraction, trigsimp, signsimp from sympy.core.symbol import Symbol from sympy.functions import (log, Abs, tan, cot, sin, cos, sec, csc, exp, acos, asin, atan, acsc, asec, arg, @@ -25,95 +25,125 @@ from sympy.matrices import Matrix from sympy.polys import (roots, Poly, degree, together, PolynomialError, RootOf) -from sympy.solvers.solvers import checksol, denoms +from sympy.solvers.solvers import checksol, denoms, unrad from sympy.solvers.inequalities import solve_univariate_inequality from sympy.utilities import filldedent import warnings -def invert_real(f_x, y, x): - """ Inverts a real valued function - - Reduces the real valued equation ``f(x) = y`` to a set of equations ``{g(x) - = h_1(y), g(x) = h_2(y), ..., g(x) = h_n(y) }`` where ``g(x)`` is a simpler - function than ``f(x)``. The return value is a tuple ``(g(x), set_h)``, - where ``g(x)`` is a function of ``x`` and ``set_h`` is the set of - functions ``{h_1(y), h_2(y), ..., h_n(y)}``. +def _invert(f_x, y, x, domain=S.Complexes): + """ + Reduce the complex valued equation ``f(x) = y`` to a set of equations + ``{g(x) = h_1(y), g(x) = h_2(y), ..., g(x) = h_n(y) }`` where ``g(x)`` is + a simpler function than ``f(x)``. The return value is a tuple ``(g(x), + set_h)``, where ``g(x)`` is a function of ``x`` and ``set_h`` is + the set of function ``{h_1(y), h_2(y), ..., h_n(y)}``. Here, ``y`` is not necessarily a symbol. - The ``set_h`` contains the functions along with the information about their - domain in which they are valid, through set operations. For instance, if - ``y = Abs(x) - n``, is inverted, then, the ``set_h`` doesn't simply - return `{-n, n}`, as it doesn't explicitly mentions about the nature of - `n` rather it will return: + The ``set_h`` contains the functions along with the information + about their domain in which they are valid, through set + operations. For instance, if ``y = Abs(x) - n``, is inverted + in the real domain, then, the ``set_h`` doesn't simply return + `{-n, n}`, as the nature of `n` is unknown; rather it will return: `Intersection([0, oo) {n}) U Intersection((-oo, 0], {-n})` + By default, the complex domain is used but note that inverting even + seemingly simple functions like ``exp(x)`` can give very different + result in the complex domain than are obtained in the real domain. + (In the case of ``exp(x)``, the inversion via ``log`` is multi-valued + in the complex domain, having infinitely many branches.) + + If you are working with real values only (or you are not sure which + function to use) you should probably use set the domain to + ``S.Reals`` (or use `invert\_real` which does that automatically). + Examples ======== - >>> from sympy.solvers.solveset import invert_real - >>> from sympy import tan, Abs, exp - >>> from sympy.abc import x, y, n - >>> invert_real(exp(x), 1, x) - (x, {0}) - >>> invert_real(tan(x), y, x) - (x, ImageSet(Lambda(_n, _n*pi + atan(y)), Integers())) + >>> from sympy.solvers.solveset import invert_complex, invert_real + >>> from sympy.abc import x, y + >>> from sympy import exp, log + When does exp(x) == y? - * ``set_h`` containing information about the domain + >>> invert_complex(exp(x), y, x) + (x, ImageSet(Lambda(_n, I*(2*_n*pi + arg(y)) + log(Abs(y))), Integers())) + >>> invert_real(exp(x), y, x) + (x, Intersection((-oo, oo), {log(y)})) - >>> invert_real(Abs(x**31 + x), y, x) - (x**31 + x, Intersection([0, oo), {y}) U Intersection((-oo, 0], {-y})) - >>> invert_real(exp(Abs(x)), y, x) - (x, Intersection([0, oo), {log(y)}) U Intersection((-oo, 0], {-log(y)})) + When does exp(x) == 1? + + >>> invert_complex(exp(x), 1, x) + (x, ImageSet(Lambda(_n, 2*_n*I*pi), Integers())) + >>> invert_real(exp(x), 1, x) + (x, {0}) See Also ======== - invert_complex + invert_real, invert_complex """ + x = sympify(x) + if not x.is_Symbol: + raise ValueError("x must be a symbol") + f_x = sympify(f_x) + if not f_x.has(x): + raise ValueError("Inverse of constant function doesn't exist") y = sympify(y) - if not y.has(x): - return _invert_real(f_x, FiniteSet(y), x) + if y.has(x): + raise ValueError("y should be independent of x ") + + if domain.is_subset(S.Reals): + x, s = _invert_real(f_x, FiniteSet(y), x) else: - raise ValueError(" y should be independent of x ") + x, s = _invert_complex(f_x, FiniteSet(y), x) + return x, s.intersection(domain) if isinstance(s, FiniteSet) else s -def _invert_real(f, g_ys, symbol): - """ Helper function for invert_real """ +invert_complex = _invert + + +def invert_real(f_x, y, x, domain=S.Reals): + return _invert(f_x, y, x, domain) - if not f.has(symbol): - raise ValueError("Inverse of constant function doesn't exist") - if f is symbol: +def _invert_real(f, g_ys, symbol): + """Helper function for _invert.""" + + if f == symbol: return (f, g_ys) - n = Dummy('n') - if hasattr(f, 'inverse') and not isinstance(f, TrigonometricFunction) and \ - not isinstance(f, HyperbolicFunction): + n = Dummy('n', real=True) + + if hasattr(f, 'inverse') and not isinstance(f, ( + TrigonometricFunction, + HyperbolicFunction, + )): if len(f.args) > 1: raise ValueError("Only functions with one argument are supported.") return _invert_real(f.args[0], - imageset(Lambda(n, f.inverse()(n)), g_ys), symbol) + imageset(Lambda(n, f.inverse()(n)), g_ys), + symbol) if isinstance(f, Abs): + pos = Interval(0, S.Infinity) + neg = Interval(S.NegativeInfinity, 0) return _invert_real(f.args[0], - Union(imageset(Lambda(n, n), g_ys).intersect(Interval(0, oo)), - imageset(Lambda(n, -n), g_ys).intersect(Interval(-oo, 0))), - symbol) + Union(imageset(Lambda(n, n), g_ys).intersect(pos), + imageset(Lambda(n, -n), g_ys).intersect(neg)), symbol) if f.is_Add: # f = g + h g, h = f.as_independent(symbol) - if g != S.Zero: + if g is not S.Zero: return _invert_real(h, imageset(Lambda(n, n - g), g_ys), symbol) if f.is_Mul: # f = g*h g, h = f.as_independent(symbol) - if g != S.One: + if g is not S.One: return _invert_real(h, imageset(Lambda(n, n/g), g_ys), symbol) if f.is_Pow: @@ -141,116 +171,51 @@ def _invert_real(f, g_ys, symbol): return _invert_real(base, res, symbol) if not base_has_sym: - return _invert_real(expo, imageset(Lambda(n, log(n)/log(base)), - g_ys), symbol) - - if isinstance(f, sin): - n = Dummy('n') - if isinstance(g_ys, FiniteSet): - sin_invs = Union(*[imageset(Lambda(n, n*pi + (-1)**n*asin(g_y)), \ - S.Integers) for g_y in g_ys]) - return _invert_real(f.args[0], sin_invs, symbol) + return _invert_real(expo, + imageset(Lambda(n, log(n)/log(base)), g_ys), symbol) - if isinstance(f, csc): - n = Dummy('n') + if isinstance(f, TrigonometricFunction): if isinstance(g_ys, FiniteSet): - csc_invs = Union(*[imageset(Lambda(n, n*pi + (-1)**n*acsc(g_y)), \ - S.Integers) for g_y in g_ys]) - return _invert_real(f.args[0], csc_invs, symbol) - - if isinstance(f, cos): - n = Dummy('n') - if isinstance(g_ys, FiniteSet): - cos_invs_f1 = Union(*[imageset(Lambda(n, 2*n*pi + acos(g_y)), \ - S.Integers) for g_y in g_ys]) - cos_invs_f2 = Union(*[imageset(Lambda(n, 2*n*pi - acos(g_y)), \ - S.Integers) for g_y in g_ys]) - cos_invs = Union(cos_invs_f1, cos_invs_f2) - return _invert_real(f.args[0], cos_invs, symbol) - - if isinstance(f, sec): - n = Dummy('n') - if isinstance(g_ys, FiniteSet): - sec_invs_f1 = Union(*[imageset(Lambda(n, 2*n*pi + asec(g_y)), \ - S.Integers) for g_y in g_ys]) - sec_invs_f2 = Union(*[imageset(Lambda(n, 2*n*pi - asec(g_y)), \ - S.Integers) for g_y in g_ys]) - sec_invs = Union(sec_invs_f1, sec_invs_f2) - return _invert_real(f.args[0], sec_invs, symbol) - - if isinstance(f, tan) or isinstance(f, cot): - n = Dummy('n') - if isinstance(g_ys, FiniteSet): - tan_cot_invs = Union(*[imageset(Lambda(n, n*pi + f.inverse()(g_y)), \ - S.Integers) for g_y in g_ys]) - return _invert_real(f.args[0], tan_cot_invs, symbol) + def inv(trig): + if isinstance(f, (sin, csc)): + F = asin if isinstance(f, sin) else acsc + return (lambda a: n*pi + (-1)**n*F(a),) + if isinstance(f, (cos, sec)): + F = acos if isinstance(f, cos) else asec + return ( + lambda a: 2*n*pi + F(a), + lambda a: 2*n*pi - F(a),) + if isinstance(f, (tan, cot)): + return (lambda a: n*pi + f.inverse()(a),) + + n = Dummy('n', integer=True) + invs = S.EmptySet + for L in inv(f): + invs += Union(*[imageset(Lambda(n, L(g)), S.Integers) for g in g_ys]) + return _invert_real(f.args[0], invs, symbol) return (f, g_ys) -def invert_complex(f_x, y, x): - """ Inverts a complex valued function. - - Reduces the complex valued equation ``f(x) = y`` to a set of equations - ``{g(x) = h_1(y), g(x) = h_2(y), ..., g(x) = h_n(y) }`` where ``g(x)`` is - a simpler function than ``f(x)``. The return value is a tuple ``(g(x), - set_h)``, where ``g(x)`` is a function of ``x`` and ``set_h`` is - the set of function ``{h_1(y), h_2(y), ..., h_n(y)}``. - Here, ``y`` is not necessarily a symbol. - - Note that `invert\_complex` and `invert\_real` don't always produce the - same result even for a seemingly simple function like ``exp(x)`` because - the complex extension of real valued ``log`` is multivariate in the complex - system and has infinitely many branches. If you are working with real - values only or you are not sure with function to use you should use - `invert\_real`. - - - Examples - ======== - - >>> from sympy.solvers.solveset import invert_complex - >>> from sympy.abc import x, y - >>> from sympy import exp, log - >>> invert_complex(log(x), y, x) - (x, {exp(y)}) - >>> invert_complex(log(x), 0, x) # Second parameter is not a symbol - (x, {1}) - >>> invert_complex(exp(x), y, x) - (x, ImageSet(Lambda(_n, I*(2*_n*pi + arg(y)) + log(Abs(y))), Integers())) - - See Also - ======== - invert_real - """ - y = sympify(y) - if not y.has(x): - return _invert_complex(f_x, FiniteSet(y), x) - else: - raise ValueError(" y should be independent of x ") - - def _invert_complex(f, g_ys, symbol): - """ Helper function for invert_complex """ + """Helper function for _invert.""" - if not f.has(symbol): - raise ValueError("Inverse of constant function doesn't exist") - - if f is symbol: + if f == symbol: return (f, g_ys) n = Dummy('n') + if f.is_Add: # f = g + h g, h = f.as_independent(symbol) - if g != S.Zero: + if g is not S.Zero: return _invert_complex(h, imageset(Lambda(n, n - g), g_ys), symbol) if f.is_Mul: # f = g*h g, h = f.as_independent(symbol) - if g != S.One: + if g is not S.One: return _invert_complex(h, imageset(Lambda(n, n/g), g_ys), symbol) if hasattr(f, 'inverse') and \ @@ -267,6 +232,7 @@ def _invert_complex(f, g_ys, symbol): log(Abs(g_y))), S.Integers) for g_y in g_ys if g_y != 0]) return _invert_complex(f.args[0], exp_invs, symbol) + return (f, g_ys) @@ -317,13 +283,23 @@ def _domain_check(f, symbol, p): for g in f.args]) -def _is_finite_with_finite_vars(f): +def _is_finite_with_finite_vars(f, domain=S.Complexes): """ - Return True if the given expression is finite when all free symbols - (that are not already specified as finite) are made finite. + Return True if the given expression is finite. For symbols that + don't assign a value for `complex` and/or `real`, the domain will + be used to assign a value; symbols that don't assign a value + for `finite` will be made finite. All other assumptions are + left unmodified. """ - reps = dict([(s, Dummy(s.name, finite=True, **s.assumptions0)) - for s in f.free_symbols if s.is_finite is None]) + def assumptions(s): + A = s.assumptions0 + if A.get('finite', None) is None: + A['finite'] = True + A.setdefault('complex', True) + A.setdefault('real', domain.is_subset(S.Reals)) + return A + + reps = dict([(s, Dummy(**assumptions(s))) for s in f.free_symbols]) return f.xreplace(reps).is_finite @@ -377,161 +353,15 @@ def _is_function_class_equation(func_class, f, symbol): return False -def solveset_real(f, symbol): - """ Solves a real valued equation. - - Parameters - ========== - - f : Expr - The target equation - symbol : Symbol - The variable for which the equation is solved - - Returns - ======= - - Set - A set of values for `symbol` for which `f` is equal to - zero. An `EmptySet` is returned if no solution is found. - A `ConditionSet` is returned as unsolved object if algorithms - to evaluate complete solutions are not yet implemented. - - `solveset_real` claims to be complete in the set of the solution it - returns. - - Raises - ====== - - NotImplementedError - Algorithms to solve inequalities in complex domain are - not yet implemented. - ValueError - The input is not valid. - RuntimeError - It is a bug, please report to the github issue tracker. - - - See Also - ======= - - solveset_complex : solver for complex domain - - Examples - ======== - - >>> from sympy import Symbol, exp, sin, sqrt, I - >>> from sympy.solvers.solveset import solveset_real - >>> x = Symbol('x', real=True) - >>> a = Symbol('a', real=True, finite=True, positive=True) - >>> solveset_real(x**2 - 1, x) - {-1, 1} - >>> solveset_real(sqrt(5*x + 6) - 2 - x, x) - {-1, 2} - >>> solveset_real(x - I, x) - EmptySet() - >>> solveset_real(x - a, x) - {a} - >>> solveset_real(exp(x) - a, x) - {log(a)} - - * In case the equation has infinitely many solutions an infinitely indexed - `ImageSet` is returned. - - >>> solveset_real(sin(x) - 1, x) - ImageSet(Lambda(_n, 2*_n*pi + pi/2), Integers()) - - * If the equation is true for any arbitrary value of the symbol a `S.Reals` - set is returned. - - >>> solveset_real(x - x, x) - (-oo, oo) - - """ - if not getattr(symbol, 'is_Symbol', False): - raise ValueError('A Symbol must be given, not type %s: %s' % - (type(symbol), symbol)) - - f = sympify(f) - if not isinstance(f, (Expr, Number)): - raise ValueError("%s is not a valid SymPy expression" % (f)) - - original_eq = f - f = together(f) - - # In this, unlike in solveset_complex, expression should only - # be expanded when fraction(f)[1] does not contain the symbol - # for which we are solving - if not symbol in fraction(f)[1].free_symbols and f.is_rational_function(): - f = expand(f) - - f = piecewise_fold(f) - - result = EmptySet() - - if f.expand().is_zero: - return S.Reals - elif not f.has(symbol): - return EmptySet() - elif f.is_Mul and all([_is_finite_with_finite_vars(m) for m in f.args]): - # if f(x) and g(x) are both finite we can say that the solution of - # f(x)*g(x) == 0 is same as Union(f(x) == 0, g(x) == 0) is not true in - # general. g(x) can grow to infinitely large for the values where - # f(x) == 0. To be sure that we are not silently allowing any - # wrong solutions we are using this technique only if both f and g are - # finite for a finite input. - result = Union(*[solveset_real(m, symbol) for m in f.args]) - elif _is_function_class_equation(TrigonometricFunction, f, symbol) or \ - _is_function_class_equation(HyperbolicFunction, f, symbol): - result = _solve_real_trig(f, symbol) - elif f.is_Piecewise: - result = EmptySet() - expr_set_pairs = f.as_expr_set_pairs() - for (expr, in_set) in expr_set_pairs: - solns = solveset_real(expr, symbol).intersect(in_set) - result = result + solns - else: - lhs, rhs_s = invert_real(f, 0, symbol) - if lhs == symbol: - result = rhs_s - elif isinstance(rhs_s, FiniteSet): - equations = [lhs - rhs for rhs in rhs_s] - for equation in equations: - if equation == f: - if any(_has_rational_power(g, symbol)[0] - for g in equation.args): - result += _solve_radical(equation, - symbol, - solveset_real) - elif equation.has(Abs): - result += _solve_abs(f, symbol) - else: - result += _solve_as_rational(equation, symbol, - solveset_solver=solveset_real, - as_poly_solver=_solve_as_poly_real) - else: - result += solveset_real(equation, symbol) - else: - result = ConditionSet(symbol, Eq(f, 0), S.Reals) - - if isinstance(result, FiniteSet): - result = [s for s in result - if isinstance(s, RootOf) - or domain_check(original_eq, symbol, s)] - return FiniteSet(*result).intersect(S.Reals) - else: - return result.intersect(S.Reals) - - -def _solve_as_rational(f, symbol, solveset_solver, as_poly_solver): +def _solve_as_rational(f, symbol, domain): """ solve rational functions""" f = together(f, deep=True) g, h = fraction(f) if not h.has(symbol): - return as_poly_solver(g, symbol) + return _solve_as_poly(g, symbol, domain) else: - valid_solns = solveset_solver(g, symbol) - invalid_solns = solveset_solver(h, symbol) + valid_solns = _solveset(g, symbol, domain) + invalid_solns = _solveset(h, symbol, domain) return valid_solns - invalid_solns @@ -559,14 +389,13 @@ def _solve_real_trig(f, symbol): return ConditionSet(symbol, Eq(f_original, 0), S.Reals) -def _solve_as_poly(f, symbol, solveset_solver, invert_func): +def _solve_as_poly(f, symbol, domain=S.Complexes): """ Solve the equation using polynomial techniques if it already is a polynomial equation or, with a change of variables, can be made so. """ result = None if f.is_polynomial(symbol): - solns = roots(f, symbol, cubics=True, quartics=True, quintics=True, domain='EX') num_roots = sum(solns.values()) @@ -578,11 +407,11 @@ def _solve_as_poly(f, symbol, solveset_solver, invert_func): if poly.degree() <= len(solns): result = FiniteSet(*solns) else: - result = ConditionSet(symbol, Eq(f, 0), S.Complexes) + result = ConditionSet(symbol, Eq(f, 0), domain) else: poly = Poly(f) if poly is None: - result = ConditionSet(symbol, Eq(f, 0), S.Complexes) + result = ConditionSet(symbol, Eq(f, 0), domain) gens = [g for g in poly.gens if g.has(symbol)] if len(gens) == 1: @@ -594,17 +423,18 @@ def _solve_as_poly(f, symbol, solveset_solver, invert_func): quintics=True).keys()) if len(poly_solns) < deg: - result = ConditionSet(symbol, Eq(f, 0), S.Complexes) + result = ConditionSet(symbol, Eq(f, 0), domain) if gen != symbol: y = Dummy('y') - lhs, rhs_s = invert_func(gen, y, symbol) - if lhs is symbol: + inverter = invert_real if domain.is_subset(S.Reals) else invert_complex + lhs, rhs_s = inverter(gen, y, symbol) + if lhs == symbol: result = Union(*[rhs_s.subs(y, s) for s in poly_solns]) else: - result = ConditionSet(symbol, Eq(f, 0), S.Complexes) + result = ConditionSet(symbol, Eq(f, 0), domain) else: - result = ConditionSet(symbol, Eq(f, 0), S.Complexes) + result = ConditionSet(symbol, Eq(f, 0), domain) if result is not None: if isinstance(result, FiniteSet): @@ -616,29 +446,11 @@ def _solve_as_poly(f, symbol, solveset_solver, invert_func): for s in result]): s = Dummy('s') result = imageset(Lambda(s, expand_complex(s)), result) + if isinstance(result, FiniteSet): + result = result.intersection(domain) return result else: - return ConditionSet(symbol, Eq(f, 0), S.Complexes) - - -def _solve_as_poly_real(f, symbol): - """ - Solve real valued equation with methods to solve polynomial - equations. - """ - return _solve_as_poly(f, symbol, - solveset_solver=solveset_real, - invert_func=invert_real) - - -def _solve_as_poly_complex(f, symbol): - """ - Solve complex valued equation with methods to solve polynomial - equations. - """ - return _solve_as_poly(f, symbol, - solveset_solver=solveset_complex, - invert_func=invert_complex) + return ConditionSet(symbol, Eq(f, 0), domain) def _has_rational_power(expr, symbol): @@ -678,7 +490,6 @@ def _has_rational_power(expr, symbol): def _solve_radical(f, symbol, solveset_solver): """ Helper function to solve equations with radicals """ - from sympy.solvers.solvers import unrad eq, cov = unrad(f) if not cov: result = solveset_solver(eq, symbol) - \ @@ -698,8 +509,12 @@ def _solve_radical(f, symbol, solveset_solver): return FiniteSet(*[s for s in result if checksol(f, symbol, s) is True]) -def _solve_abs(f, symbol): +def _solve_abs(f, symbol, domain): """ Helper function to solve equation involving absolute value function """ + if not domain.is_subset(S.Reals): + raise ValueError(filldedent(''' + Absolute values cannot be inverted in the + complex domain.''')) p, q, r = Wild('p'), Wild('q'), Wild('r') pattern_match = f.match(p*Abs(q) + r) or {} if not pattern_match.get(p, S.Zero).is_zero: @@ -715,117 +530,114 @@ def _solve_abs(f, symbol): symbol).intersect(q_neg_cond) return Union(sols_q_pos, sols_q_neg) else: - return ConditionSet(symbol, Eq(f, 0), S.Complexes) - - -def solveset_complex(f, symbol): - """ Solve a complex valued equation. - - Parameters - ========== - - f : Expr - The target equation - symbol : Symbol - The variable for which the equation is solved - - Returns - ======= - - Set - A set of values for `symbol` for which `f` equal to - zero. An `EmptySet` is returned if no solution is found. - A `ConditionSet` is returned as an unsolved object if algorithms - to evaluate complete solutions are not yet implemented. - - `solveset_complex` claims to be complete in the solution set that - it returns. - - Raises - ====== + return ConditionSet(symbol, Eq(f, 0), domain) - NotImplementedError - The algorithms to solve inequalities in complex domain are - not yet implemented. - ValueError - The input is not valid. - RuntimeError - It is a bug, please report to the github issue tracker. - See Also - ======== - solveset_real: solver for real domain +def _solveset(f, symbol, domain, _check=False): + """Helper for solveset to return a result from an expression + that has already been sympify'ed and is known to contain the + given symbol.""" + # _check controls whether the answer is checked or not - Examples - ======== - - >>> from sympy import Symbol, exp - >>> from sympy.solvers.solveset import solveset_complex - >>> from sympy.abc import x, a, b, c - >>> solveset_complex(a*x**2 + b*x +c, x) - {-b/(2*a) - sqrt(-4*a*c + b**2)/(2*a), -b/(2*a) + sqrt(-4*a*c + b**2)/(2*a)} - - * Due to the fact that complex extension of my real valued functions are - multivariate even some simple equations can have infinitely many - solution. - - >>> solveset_complex(exp(x) - 1, x) - ImageSet(Lambda(_n, 2*_n*I*pi), Integers()) - - """ - if not getattr(symbol, 'is_Symbol', False): - raise ValueError('A Symbol must be given, not type %s: %s' % - (type(symbol), symbol)) + orig_f = f + f = together(f) + if f.is_Mul: + _, f = f.as_independent(symbol, as_Add=False) + if f.is_Add: + a, h = f.as_independent(symbol) + m, h = h.as_independent(symbol, as_Add=False) + f = a/m + h # XXX condition `m != 0` should be added to soln + f = piecewise_fold(f) - f = sympify(f) - original_eq = f - if not isinstance(f, (Expr, Number)): - raise ValueError(" %s is not a valid sympy expression" % (f)) + # assign the solvers to use + solver = lambda f, x, domain=domain: _solveset(f, x, domain) + if domain.is_subset(S.Reals): + inverter_func = invert_real + else: + inverter_func = invert_complex + inverter = lambda f, rhs, symbol: inverter_func(f, rhs, symbol, domain) - f = together(f) - # Without this equations like a + 4*x**2 - E keep oscillating - # into form a/4 + x**2 - E/4 and (a + 4*x**2 - E)/4 - if not fraction(f)[1].has(symbol): - f = expand(f) + result = EmptySet() - if f.is_zero: - return S.Complexes + if f.expand().is_zero: + return domain elif not f.has(symbol): + return EmptySet() + elif f.is_Mul and all(_is_finite_with_finite_vars(m, domain) + for m in f.args): + # if f(x) and g(x) are both finite we can say that the solution of + # f(x)*g(x) == 0 is same as Union(f(x) == 0, g(x) == 0) is not true in + # general. g(x) can grow to infinitely large for the values where + # f(x) == 0. To be sure that we are not silently allowing any + # wrong solutions we are using this technique only if both f and g are + # finite for a finite input. + result = Union(*[solver(m, symbol) for m in f.args]) + elif _is_function_class_equation(TrigonometricFunction, f, symbol) or \ + _is_function_class_equation(HyperbolicFunction, f, symbol): + result = _solve_real_trig(f, symbol) + elif f.is_Piecewise: + dom = domain result = EmptySet() - elif f.is_Mul and all([_is_finite_with_finite_vars(m) for m in f.args]): - result = Union(*[solveset_complex(m, symbol) for m in f.args]) + expr_set_pairs = f.as_expr_set_pairs() + for (expr, in_set) in expr_set_pairs: + if in_set.is_Relational: + in_set = in_set.as_set() + if in_set.is_Interval: + dom -= in_set + solns = solver(expr, symbol, in_set) + result += solns else: - lhs, rhs_s = invert_complex(f, 0, symbol) + lhs, rhs_s = inverter(f, 0, symbol) if lhs == symbol: + # do some very minimal simplification since + # repeated inversion may have left the result + # in a state that other solvers (e.g. poly) + # would have simplified; this is done here + # rather than in the inverter since here it + # is only done once whereas there it would + # be repeated for each step of the inversion + if isinstance(rhs_s, FiniteSet): + rhs_s = FiniteSet(*[Mul(* + signsimp(i).as_content_primitive()) + for i in rhs_s]) result = rhs_s elif isinstance(rhs_s, FiniteSet): - equations = [lhs - rhs for rhs in rhs_s] - result = EmptySet() - for equation in equations: + for equation in [lhs - rhs for rhs in rhs_s]: if equation == f: if any(_has_rational_power(g, symbol)[0] for g in equation.args) or _has_rational_power( equation, symbol)[0]: result += _solve_radical(equation, symbol, - solveset_complex) + solver) + elif equation.has(Abs): + result += _solve_abs(f, symbol, domain) else: - result += _solve_as_rational(equation, symbol, - solveset_solver=solveset_complex, - as_poly_solver=_solve_as_poly_complex) + result += _solve_as_rational(equation, symbol, domain) else: - result += solveset_complex(equation, symbol) + result += solver(equation, symbol) else: - result = ConditionSet(symbol, Eq(f, 0), S.Complexes) + result = ConditionSet(symbol, Eq(f, 0), domain) - if isinstance(result, FiniteSet): - result = [s for s in result - if isinstance(s, RootOf) - or domain_check(original_eq, symbol, s)] - return FiniteSet(*result) - else: - return result + if _check: + if isinstance(result, ConditionSet): + # it wasn't solved or has enumerated all conditions + # -- leave it alone + return result + + # whittle away all but the symbol-containing core + # to use this for testing + fx = orig_f.as_independent(symbol, as_Add=True)[1] + fx = fx.as_independent(symbol, as_Add=False)[1] + + if isinstance(result, FiniteSet): + # check the result for invalid solutions + result = FiniteSet(*[s for s in result + if isinstance(s, RootOf) + or domain_check(fx, symbol, s)]) + + return result def solveset(f, symbol=None, domain=S.Complexes): @@ -864,12 +676,6 @@ def solveset(f, symbol=None, domain=S.Complexes): It is a bug, please report to the github issue tracker. - `solveset` uses two underlying functions `solveset_real` and - `solveset_complex` to solve equations. They are the solvers for real and - complex domain respectively. `solveset` ignores the assumptions on the - variable being solved for and instead, uses the `domain` parameter to - decide which solver to use. - Notes ===== @@ -889,33 +695,49 @@ def solveset(f, symbol=None, domain=S.Complexes): Examples ======== - >>> from sympy import exp, Symbol, Eq, pprint, S, solveset - >>> from sympy.abc import x + >>> from sympy import exp, sin, Symbol, pprint, S + >>> from sympy.solvers.solveset import solveset, solveset_real + + * The default domain is complex. Not specifying a domain will lead + to the solving of the equation in the complex domain (and this + is not affected by the assumptions on the symbol): - * The default domain is complex. Not specifying a domain will lead to the - solving of the equation in the complex domain. + >>> x = Symbol('x') + >>> pprint(solveset(exp(x) - 1, x), use_unicode=False) + {2*n*I*pi | n in Integers()} + >>> x = Symbol('x', real=True) >>> pprint(solveset(exp(x) - 1, x), use_unicode=False) {2*n*I*pi | n in Integers()} - * If you want to solve equation in real domain by the `solveset` - interface, then specify that the domain is real. Alternatively use - `solveset\_real`. + * If you want to use `solveset` to solve the equation in the + real domain, provide a real domain. (Using `solveset\_real` + does this automatically.) + >>> R = S.Reals >>> x = Symbol('x') - >>> solveset(exp(x) - 1, x, S.Reals) + >>> solveset(exp(x) - 1, x, R) {0} - >>> solveset(Eq(exp(x), 1), x, S.Reals) + >>> solveset_real(exp(x) - 1, x) {0} + The solution is mostly unaffected by assumptions on the symbol, + but there may be some slight difference: + + >>> pprint(solveset(sin(x)/x,x), use_unicode=False) + ({2*n*pi | n in Integers()} \ {0}) U ({2*n*pi + pi | n in Integers()} \ {0}) + + >>> p = Symbol('p', positive=True) + >>> pprint(solveset(sin(p)/p, p), use_unicode=False) + {2*n*pi | n in Integers()} U {2*n*pi + pi | n in Integers()} + * Inequalities can be solved over the real domain only. Use of a complex domain leads to a NotImplementedError. - >>> solveset(exp(x) > 1, x, S.Reals) + >>> solveset(exp(x) > 1, x, R) (0, oo) """ - f = sympify(f) if f is S.true: @@ -924,6 +746,9 @@ def solveset(f, symbol=None, domain=S.Complexes): if f is S.false: return S.EmptySet + if not isinstance(f, (Expr, Number)): + raise ValueError("%s is not a valid SymPy expression" % (f)) + free_symbols = f.free_symbols if not free_symbols: @@ -950,8 +775,7 @@ def solveset(f, symbol=None, domain=S.Complexes): if isinstance(f, Eq): from sympy.core import Add f = Add(f.lhs, - f.rhs, evaluate=False) - - if f.is_Relational: + elif f.is_Relational: if not domain.is_subset(S.Reals): raise NotImplementedError(filldedent(''' Inequalities in the complex domain are @@ -959,20 +783,28 @@ def solveset(f, symbol=None, domain=S.Complexes): setting domain=S.Reals''')) try: result = solve_univariate_inequality( - f, symbol, relational=False).intersection(domain) + f, symbol, relational=False) - _invalid_solutions( + f, symbol, domain) except NotImplementedError: result = ConditionSet(symbol, f, domain) return result - if isinstance(f, (Expr, Number)): - if domain is S.Reals: - return solveset_real(f, symbol) - elif domain is S.Complexes: - return solveset_complex(f, symbol) - elif domain.is_subset(S.Reals): - return Intersection(solveset_real(f, symbol), domain) - else: - return Intersection(solveset_complex(f, symbol), domain) + return _solveset(f, symbol, domain, _check=True) + + +def _invalid_solutions(f, symbol, domain): + bad = S.EmptySet + for d in denoms(f): + bad += _solveset(d, symbol, domain, _check=False) + return bad + + +def solveset_real(f, symbol): + return solveset(f, symbol, S.Reals) + + +def solveset_complex(f, symbol): + return solveset(f, symbol, S.Complexes) ###############################################################################
recursion error with solveset ```python >>> solveset(f(x)-pi/2,x) ConditionSet(x, Eq(2*f(x) - pi, 0), Complexes((-oo, oo) x (-oo, oo), False)) >>> solveset(f(x)-pi,x,S.Reals) ConditionSet(x, Eq(f(x) - pi, 0), (-oo, oo)) >>> solveset(f(x)-pi/2,x,S.Reals) # change pi to pi/2 and the error occurs unlike first case above ... File "sympy\solvers\solveset.py", line 513, in solveset_real result += solveset_real(equation, symbol) File "sympy\solvers\solveset.py", line 513, in solveset_real result += solveset_real(equation, symbol) File "sympy\solvers\solveset.py", line 513, in solveset_real result += solveset_real(equation, symbol) File "sympy\solvers\solveset.py", line 513, in solveset_real result += solveset_real(equation, symbol) File "sympy\solvers\solveset.py", line 476, in solveset_real elif f.is_Mul and all([_is_finite_with_finite_vars(m) for m in f.args]): File "sympy\solvers\solveset.py", line 327, in _is_finite_with_finite_vars return f.xreplace(reps).is_finite File "sympy\core\assumptions.py", line 245, in getit return _ask(fact, self) File "sympy\core\assumptions.py", line 288, in _ask a = evaluate(obj) File "sympy\core\add.py", line 457, in <lambda> (a.is_finite for a in self.args), quick_exit=True) File "sympy\core\logic.py", line 78, in _fuzzy_group for a in args: File "sympy\core\add.py", line 457, in <genexpr> (a.is_finite for a in self.args), quick_exit=True) File "sympy\core\assumptions.py", line 245, in getit return _ask(fact, self) File "sympy\core\assumptions.py", line 300, in _ask _ask(pk, obj) File "sympy\core\assumptions.py", line 300, in _ask _ask(pk, obj) File "sympy\core\assumptions.py", line 300, in _ask _ask(pk, obj) File "sympy\core\assumptions.py", line 300, in _ask _ask(pk, obj) File "sympy\core\assumptions.py", line 300, in _ask _ask(pk, obj) File "sympy\core\assumptions.py", line 288, in _ask a = evaluate(obj) File "sympy\core\mul.py", line 1245, in _eval_is_even is_integer = self.is_integer File "sympy\core\assumptions.py", line 245, in getit return _ask(fact, self) File "sympy\core\assumptions.py", line 288, in _ask a = evaluate(obj) File "sympy\core\mul.py", line 1054, in _eval_is_integer is_rational = self.is_rational File "sympy\core\assumptions.py", line 245, in getit return _ask(fact, self) File "sympy\core\assumptions.py", line 300, in _ask _ask(pk, obj) File "sympy\core\assumptions.py", line 300, in _ask _ask(pk, obj) File "sympy\core\assumptions.py", line 300, in _ask _ask(pk, obj) File "sympy\core\assumptions.py", line 300, in _ask _ask(pk, obj) File "sympy\core\assumptions.py", line 300, in _ask _ask(pk, obj) File "sympy\core\assumptions.py", line 300, in _ask _ask(pk, obj) File "sympy\core\assumptions.py", line 300, in _ask _ask(pk, obj) File "sympy\core\assumptions.py", line 288, in _ask a = evaluate(obj) File "sympy\core\mul.py", line 1113, in _eval_is_hermitian return self._eval_herm_antiherm(True) File "sympy\core\mul.py", line 1119, in _eval_herm_antiherm if not t.is_commutative: File "sympy\core\function.py", line 453, in is_commutative if all(getattr(t, 'is_commutative') for t in self.args): File "sympy\core\function.py", line 453, in <genexpr> if all(getattr(t, 'is_commutative') for t in self.args): File "sympy\core\assumptions.py", line 241, in getit return self._assumptions[fact] RuntimeError: maximum recursion depth exceeded ```
sympy/sympy
diff --git a/sympy/polys/tests/test_subresultants_qq_zz.py b/sympy/polys/tests/test_subresultants_qq_zz.py index a377a936c2..737bdf7752 100644 --- a/sympy/polys/tests/test_subresultants_qq_zz.py +++ b/sympy/polys/tests/test_subresultants_qq_zz.py @@ -1,8 +1,6 @@ from sympy import var, sturm, subresultants, prem, pquo -from sympy.matrices import Matrix, eye -from sympy.polys.subresultants_qq_zz import (sylvester, bezout, - subresultants_bezout, modified_subresultants_bezout, - process_bezout_output, backward_eye, +from sympy.matrices import Matrix +from sympy.polys.subresultants_qq_zz import (sylvester, sturm_pg, sturm_q, sturm_amv, euclid_pg, euclid_q, euclid_amv, modified_subresultants_pg, subresultants_pg, subresultants_amv_q, quo_z, rem_z, subresultants_amv, @@ -34,44 +32,6 @@ def test_sylvester(): assert sylvester(x**3 - 7*x + 7, 3*x**2 - 7, x, 2) == Matrix([ [1, 0, -7, 7, 0, 0], [0, 3, 0, -7, 0, 0], [0, 1, 0, -7, 7, 0], [0, 0, 3, 0, -7, 0], [0, 0, 1, 0, -7, 7], [0, 0, 0, 3, 0, -7]]) -def test_bezout(): - x = var('x') - - p = -2*x**5+7*x**3+9*x**2-3*x+1 - q = -10*x**4+21*x**2+18*x-3 - assert bezout(p, q, x, 'bz').det() == sylvester(p, q, x, 2).det() - assert bezout(p, q, x, 'bz').det() != sylvester(p, q, x, 1).det() - assert bezout(p, q, x, 'prs') == backward_eye(5) * bezout(p, q, x, 'bz') * backward_eye(5) - -def test_subresultants_bezout(): - x = var('x') - - p = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5 - q = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21 - assert subresultants_bezout(p, q, x) == subresultants(p, q, x) - assert subresultants_bezout(p, q, x)[-1] == sylvester(p, q, x).det() - assert subresultants_bezout(p, q, x) != euclid_amv(p, q, x) - amv_factors = [1, 1, -1, 1, -1, 1] - assert subresultants_bezout(p, q, x) == [i*j for i, j in zip(amv_factors, modified_subresultants_amv(p, q, x))] - - p = x**3 - 7*x + 7 - q = 3*x**2 - 7 - assert subresultants_bezout(p, q, x) == euclid_amv(p, q, x) - -def test_modified_subresultants_bezout(): - x = var('x') - - p = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5 - q = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21 - amv_factors = [1, 1, -1, 1, -1, 1] - assert modified_subresultants_bezout(p, q, x) == [i*j for i, j in zip(amv_factors, subresultants_amv(p, q, x))] - assert modified_subresultants_bezout(p, q, x)[-1] != sylvester(p + x**8, q, x).det() - assert modified_subresultants_bezout(p, q, x) != sturm_amv(p, q, x) - - p = x**3 - 7*x + 7 - q = 3*x**2 - 7 - assert modified_subresultants_bezout(p, q, x) == sturm_amv(p, q, x) - assert modified_subresultants_bezout(-p, q, x) != sturm_amv(-p, q, x) def test_sturm_pg(): x = var('x') diff --git a/sympy/solvers/tests/test_solveset.py b/sympy/solvers/tests/test_solveset.py index e8b15d0532..ee49f9cc72 100644 --- a/sympy/solvers/tests/test_solveset.py +++ b/sympy/solvers/tests/test_solveset.py @@ -1,5 +1,5 @@ from sympy import ( - Abs, Dummy, Eq, Gt, + Abs, Dummy, Eq, Gt, Function, LambertW, Piecewise, Poly, Rational, S, Symbol, Matrix, asin, acos, acsc, asec, atan, atanh, cos, csc, erf, erfinv, erfc, erfcinv, exp, log, pi, sin, sinh, sec, sqrt, symbols, @@ -15,7 +15,7 @@ from sympy.polys.rootoftools import CRootOf -from sympy.sets import (FiniteSet, ConditionSet) +from sympy.sets import (FiniteSet, ConditionSet, Complement, ImageSet) from sympy.utilities.pytest import XFAIL, raises, skip, slow from sympy.utilities.randtest import verify_numerically as tn @@ -39,13 +39,19 @@ def test_invert_real(): - x = Dummy(real=True) + x = Symbol('x', real=True) + y = Symbol('y') n = Symbol('n') + def ireal(x, s=S.Reals): + return Intersection(s, x) minus_n = Intersection(Interval(-oo, 0), FiniteSet(-n)) plus_n = Intersection(Interval(0, oo), FiniteSet(n)) assert solveset(abs(x) - n, x, S.Reals) == Union(minus_n, plus_n) + assert invert_real(exp(x), y, x) == (x, ireal(FiniteSet(log(y)))) + + y = Symbol('y', positive=True) n = Symbol('n', real=True) assert invert_real(x + 3, y, x) == (x, FiniteSet(y - 3)) assert invert_real(x*3, y, x) == (x, FiniteSet(y / 3)) @@ -54,7 +60,7 @@ def test_invert_real(): assert invert_real(exp(3*x), y, x) == (x, FiniteSet(log(y) / 3)) assert invert_real(exp(x + 3), y, x) == (x, FiniteSet(log(y) - 3)) - assert invert_real(exp(x) + 3, y, x) == (x, FiniteSet(log(y - 3))) + assert invert_real(exp(x) + 3, y, x) == (x, ireal(FiniteSet(log(y - 3)))) assert invert_real(exp(x)*3, y, x) == (x, FiniteSet(log(y / 3))) assert invert_real(log(x), y, x) == (x, FiniteSet(exp(y))) @@ -66,7 +72,7 @@ def test_invert_real(): assert invert_real(Abs(x), y, x) == (x, Union(minus_y, plus_y)) assert invert_real(2**x, y, x) == (x, FiniteSet(log(y)/log(2))) - assert invert_real(2**exp(x), y, x) == (x, FiniteSet(log(log(y)/log(2)))) + assert invert_real(2**exp(x), y, x) == (x, ireal(FiniteSet(log(log(y)/log(2))))) assert invert_real(x**2, y, x) == (x, FiniteSet(sqrt(y), -sqrt(y))) assert invert_real(x**Rational(1, 2), y, x) == (x, FiniteSet(y**2)) @@ -157,8 +163,9 @@ def test_invert_complex(): assert invert_complex(log(x), y, x) == (x, FiniteSet(exp(y))) - raises(ValueError, lambda: invert_real(S.One, y, x)) + raises(ValueError, lambda: invert_real(1, y, x)) raises(ValueError, lambda: invert_complex(x, x, x)) + raises(ValueError, lambda: invert_complex(x, x, 1)) def test_domain_check(): @@ -248,9 +255,7 @@ def test_solve_invert(): assert solveset_real(3**(x + 2), x) == FiniteSet() assert solveset_real(3**(2 - x), x) == FiniteSet() - b = Symbol('b', positive=True) - y = Symbol('y', positive=True) - assert solveset_real(y - b*exp(a/x), x) == FiniteSet(a/log(y/b)) + assert solveset_real(y - b*exp(a/x), x) == Intersection(S.Reals, FiniteSet(a/log(y/b))) # issue 4504 assert solveset_real(2**x - 10, x) == FiniteSet(log(10)/log(2)) @@ -495,6 +500,7 @@ def test_no_sol(): def test_sol_zero_real(): assert solveset_real(0, x) == S.Reals + assert solveset(0, x, Interval(1, 2)) == Interval(1, 2) assert solveset_real(-x**2 - 2*x + (x + 1)**2 - 1, x) == S.Reals @@ -566,11 +572,12 @@ def test_solve_abs(): ) == Interval(-1, 2) # issue #10069 - assert solveset_real(abs(1/(x - 1)) - 1 > 0, x) == \ - ConditionSet(x, Eq((1 - Abs(x - 1))/Abs(x - 1) > 0, 0), - S.Reals) - assert solveset(abs(1/(x - 1)) - 1 > 0, x, domain=S.Reals - ) == Union(Interval.open(0, 1), Interval.open(1, 2)) + eq = abs(1/(x - 1)) - 1 > 0 + u = Union(Interval.open(0, 1), Interval.open(1, 2)) + assert solveset_real(eq, x) == u + assert solveset(eq, x, domain=S.Reals) == u + + raises(ValueError, lambda: solveset(abs(x) - 1, x)) @XFAIL @@ -620,16 +627,20 @@ def test_atan2(): def test_piecewise(): eq = Piecewise((x - 2, Gt(x, 2)), (2 - x, True)) - 3 - f = Piecewise(((x - 2)**2, x >= 0), (0, True)) assert set(solveset_real(eq, x)) == set(FiniteSet(-1, 5)) + absxm3 = Piecewise( (x - 3, S(0) <= x - 3), - (3 - x, S(0) > x - 3) - ) + (3 - x, S(0) > x - 3)) y = Symbol('y', positive=True) assert solveset_real(absxm3 - y, x) == FiniteSet(-y + 3, y + 3) + + f = Piecewise(((x - 2)**2, x >= 0), (0, True)) assert solveset(f, x, domain=S.Reals) == Union(FiniteSet(2), Interval(-oo, 0, True, True)) + assert solveset(Piecewise((x + 1, x > 0), (I, True)) - I, x) == \ + Interval(-oo, 0) + def test_solveset_complex_polynomial(): from sympy.abc import x, a, b, c @@ -726,6 +737,16 @@ def test_solve_trig(): assert solveset_real(sin(x)**2 + cos(x)**2, x) == S.EmptySet + assert solveset_complex(cos(x) - S.Half, x) == \ + Union(imageset(Lambda(n, 2*n*pi + pi/3), S.Integers), + imageset(Lambda(n, 2*n*pi - pi/3), S.Integers)) + + y, a = symbols('y,a') + assert solveset(sin(y + a) - sin(y), a, domain=S.Reals) == \ + Union(imageset(Lambda(n, 2*n*pi), S.Integers), + imageset(Lambda(n, + -I*(I*(2*n*pi +arg(-exp(-2*I*y))) + 2*im(y))), S.Integers)) + @XFAIL def test_solve_trig_abs(): @@ -739,12 +760,6 @@ def test_solve_invalid_sol(): assert 0 not in solveset_complex((exp(x) - 1)/x, x) -def test_solve_complex_unsolvable(): - unsolved_object = ConditionSet(x, Eq(2*cos(x) - 1, 0), S.Complexes) - solution = solveset_complex(cos(x) - S.Half, x) - assert solution == unsolved_object - - @XFAIL def test_solve_trig_simplified(): from sympy.abc import n @@ -873,7 +888,7 @@ def test_solveset(): S.Integers) -def test_conditonset(): +def test_conditionset(): assert solveset(Eq(sin(x)**2 + cos(x)**2, 1), x, domain=S.Reals) == \ ConditionSet(x, True, S.Reals) @@ -889,10 +904,6 @@ def test_conditonset(): assert solveset(x + sin(x) > 1, x, domain=S.Reals) == \ ConditionSet(x, x + sin(x) > 1, S.Reals) - y,a = symbols('y,a') - assert solveset(sin(y + a) - sin(y), a, domain=S.Reals) == \ - ConditionSet(a, Eq(-sin(y) + sin(y + a), 0), S.Reals) - @XFAIL def test_conditionset_equality(): @@ -1070,3 +1081,23 @@ def test_issue_9913(): def test_issue_10397(): assert solveset(sqrt(x), x, S.Complexes) == FiniteSet(0) + + +def test_simplification(): + eq = x + (a - b)/(-2*a + 2*b) + assert solveset(eq, x) == FiniteSet(S.Half) + assert solveset(eq, x, S.Reals) == FiniteSet(S.Half) + + +def test_issue_10555(): + f = Function('f') + assert solveset(f(x) - pi/2, x, S.Reals) == \ + ConditionSet(x, Eq(2*f(x) - pi, 0), S.Reals) + + +def test_issue_8715(): + eq = x + 1/x > -2 + 1/x + assert solveset(eq, x, S.Reals) == \ + (Interval.open(-2, oo) - FiniteSet(0)) + assert solveset(eq.subs(x,log(x)), x, S.Reals) == \ + Interval.open(exp(-2), oo) - FiniteSet(1) diff --git a/sympy/stats/tests/test_continuous_rv.py b/sympy/stats/tests/test_continuous_rv.py index e2ae710a33..ae2cb74e29 100644 --- a/sympy/stats/tests/test_continuous_rv.py +++ b/sympy/stats/tests/test_continuous_rv.py @@ -41,7 +41,7 @@ def test_single_normal(): pdf = density(Y) x = Symbol('x') assert (pdf(x) == - 2**S.Half*exp(-(x - mu)**2/(2*sigma**2))/(2*pi**S.Half*sigma)) + 2**S.Half*exp(-(mu - x)**2/(2*sigma**2))/(2*pi**S.Half*sigma)) assert P(X**2 < 1) == erf(2**S.Half/2)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 5 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "mpmath>=0.19", "pytest" ], "pre_install": [], "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///croot/attrs_1668696182826/work certifi @ file:///croot/certifi_1671487769961/work/certifi flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work mpmath==1.3.0 packaging @ file:///croot/packaging_1671697413597/work pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pytest==7.1.2 -e git+https://github.com/sympy/sympy.git@425ee8695a04a17b71ea15fa045fa95efffbeb1d#egg=sympy tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing_extensions @ file:///croot/typing_extensions_1669924550328/work zipp @ file:///croot/zipp_1672387121353/work
name: sympy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=22.1.0=py37h06a4308_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - flit-core=3.6.0=pyhd3eb1b0_0 - importlib-metadata=4.11.3=py37h06a4308_0 - importlib_metadata=4.11.3=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=22.0=py37h06a4308_0 - pip=22.3.1=py37h06a4308_0 - pluggy=1.0.0=py37h06a4308_1 - py=1.11.0=pyhd3eb1b0_0 - pytest=7.1.2=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py37h06a4308_0 - typing_extensions=4.4.0=py37h06a4308_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zipp=3.11.0=py37h06a4308_0 - zlib=1.2.13=h5eee18b_1 - pip: - mpmath==1.3.0 prefix: /opt/conda/envs/sympy
[ "sympy/solvers/tests/test_solveset.py::test_invert_real", "sympy/solvers/tests/test_solveset.py::test_invert_complex", "sympy/solvers/tests/test_solveset.py::test_solve_abs", "sympy/solvers/tests/test_solveset.py::test_solve_only_exp_1", "sympy/solvers/tests/test_solveset.py::test_piecewise", "sympy/solvers/tests/test_solveset.py::test_solve_trig", "sympy/solvers/tests/test_solveset.py::test_issue_10555", "sympy/solvers/tests/test_solveset.py::test_issue_8715", "sympy/stats/tests/test_continuous_rv.py::test_single_normal" ]
[]
[ "sympy/polys/tests/test_subresultants_qq_zz.py::test_sylvester", "sympy/polys/tests/test_subresultants_qq_zz.py::test_sturm_pg", "sympy/polys/tests/test_subresultants_qq_zz.py::test_sturm_q", "sympy/polys/tests/test_subresultants_qq_zz.py::test_sturm_amv", "sympy/polys/tests/test_subresultants_qq_zz.py::test_euclid_pg", "sympy/polys/tests/test_subresultants_qq_zz.py::test_euclid_q", "sympy/polys/tests/test_subresultants_qq_zz.py::test_euclid_amv", "sympy/polys/tests/test_subresultants_qq_zz.py::test_modified_subresultants_pg", "sympy/polys/tests/test_subresultants_qq_zz.py::test_subresultants_pg", "sympy/polys/tests/test_subresultants_qq_zz.py::test_subresultants_amv_q", "sympy/polys/tests/test_subresultants_qq_zz.py::test_rem_z", "sympy/polys/tests/test_subresultants_qq_zz.py::test_quo_z", "sympy/polys/tests/test_subresultants_qq_zz.py::test_subresultants_amv", "sympy/polys/tests/test_subresultants_qq_zz.py::test_modified_subresultants_amv", "sympy/polys/tests/test_subresultants_qq_zz.py::test_subresultants_rem", "sympy/polys/tests/test_subresultants_qq_zz.py::test_subresultants_vv", "sympy/polys/tests/test_subresultants_qq_zz.py::test_subresultants_vv_2", "sympy/solvers/tests/test_solveset.py::test_domain_check", "sympy/solvers/tests/test_solveset.py::test_is_function_class_equation", "sympy/solvers/tests/test_solveset.py::test_garbage_input", "sympy/solvers/tests/test_solveset.py::test_solve_mul", "sympy/solvers/tests/test_solveset.py::test_solve_invert", "sympy/solvers/tests/test_solveset.py::test_errorinverses", "sympy/solvers/tests/test_solveset.py::test_solve_polynomial", "sympy/solvers/tests/test_solveset.py::test_return_root_of", "sympy/solvers/tests/test_solveset.py::test__has_rational_power", "sympy/solvers/tests/test_solveset.py::test_solveset_sqrt_1", "sympy/solvers/tests/test_solveset.py::test_solveset_sqrt_2", "sympy/solvers/tests/test_solveset.py::test_solve_sqrt_3", "sympy/solvers/tests/test_solveset.py::test_solve_polynomial_symbolic_param", "sympy/solvers/tests/test_solveset.py::test_solve_rational", "sympy/solvers/tests/test_solveset.py::test_solveset_real_gen_is_pow", "sympy/solvers/tests/test_solveset.py::test_no_sol", "sympy/solvers/tests/test_solveset.py::test_sol_zero_real", "sympy/solvers/tests/test_solveset.py::test_no_sol_rational_extragenous", "sympy/solvers/tests/test_solveset.py::test_solve_polynomial_cv_1a", "sympy/solvers/tests/test_solveset.py::test_solveset_real_rational", "sympy/solvers/tests/test_solveset.py::test_solveset_real_log", "sympy/solvers/tests/test_solveset.py::test_poly_gens", "sympy/solvers/tests/test_solveset.py::test_real_imag_splitting", "sympy/solvers/tests/test_solveset.py::test_units", "sympy/solvers/tests/test_solveset.py::test_atan2", "sympy/solvers/tests/test_solveset.py::test_solveset_complex_polynomial", "sympy/solvers/tests/test_solveset.py::test_sol_zero_complex", "sympy/solvers/tests/test_solveset.py::test_solveset_complex_rational", "sympy/solvers/tests/test_solveset.py::test_solveset_complex_exp", "sympy/solvers/tests/test_solveset.py::test_solve_complex_log", "sympy/solvers/tests/test_solveset.py::test_solve_complex_sqrt", "sympy/solvers/tests/test_solveset.py::test_solveset_complex_tan", "sympy/solvers/tests/test_solveset.py::test_solve_invalid_sol", "sympy/solvers/tests/test_solveset.py::test_solveset", "sympy/solvers/tests/test_solveset.py::test_conditionset", "sympy/solvers/tests/test_solveset.py::test_solveset_domain", "sympy/solvers/tests/test_solveset.py::test_improve_coverage", "sympy/solvers/tests/test_solveset.py::test_issue_9522", "sympy/solvers/tests/test_solveset.py::test_linear_eq_to_matrix", "sympy/solvers/tests/test_solveset.py::test_linsolve", "sympy/solvers/tests/test_solveset.py::test_issue_9556", "sympy/solvers/tests/test_solveset.py::test_issue_9611", "sympy/solvers/tests/test_solveset.py::test_issue_9557", "sympy/solvers/tests/test_solveset.py::test_issue_9778", "sympy/solvers/tests/test_solveset.py::test_issue_9849", "sympy/solvers/tests/test_solveset.py::test_issue_9953", "sympy/solvers/tests/test_solveset.py::test_issue_9913", "sympy/solvers/tests/test_solveset.py::test_issue_10397", "sympy/solvers/tests/test_solveset.py::test_simplification", "sympy/stats/tests/test_continuous_rv.py::test_ContinuousDomain", "sympy/stats/tests/test_continuous_rv.py::test_multiple_normal", "sympy/stats/tests/test_continuous_rv.py::test_symbolic", "sympy/stats/tests/test_continuous_rv.py::test_cdf", "sympy/stats/tests/test_continuous_rv.py::test_sample", "sympy/stats/tests/test_continuous_rv.py::test_ContinuousRV", "sympy/stats/tests/test_continuous_rv.py::test_arcsin", "sympy/stats/tests/test_continuous_rv.py::test_benini", "sympy/stats/tests/test_continuous_rv.py::test_beta", "sympy/stats/tests/test_continuous_rv.py::test_betaprime", "sympy/stats/tests/test_continuous_rv.py::test_cauchy", "sympy/stats/tests/test_continuous_rv.py::test_chi", "sympy/stats/tests/test_continuous_rv.py::test_chi_noncentral", "sympy/stats/tests/test_continuous_rv.py::test_chi_squared", "sympy/stats/tests/test_continuous_rv.py::test_dagum", "sympy/stats/tests/test_continuous_rv.py::test_erlang", "sympy/stats/tests/test_continuous_rv.py::test_exponential", "sympy/stats/tests/test_continuous_rv.py::test_f_distribution", "sympy/stats/tests/test_continuous_rv.py::test_fisher_z", "sympy/stats/tests/test_continuous_rv.py::test_frechet", "sympy/stats/tests/test_continuous_rv.py::test_gamma", "sympy/stats/tests/test_continuous_rv.py::test_gamma_inverse", "sympy/stats/tests/test_continuous_rv.py::test_gompertz", "sympy/stats/tests/test_continuous_rv.py::test_kumaraswamy", "sympy/stats/tests/test_continuous_rv.py::test_laplace", "sympy/stats/tests/test_continuous_rv.py::test_logistic", "sympy/stats/tests/test_continuous_rv.py::test_lognormal", "sympy/stats/tests/test_continuous_rv.py::test_maxwell", "sympy/stats/tests/test_continuous_rv.py::test_nakagami", "sympy/stats/tests/test_continuous_rv.py::test_pareto", "sympy/stats/tests/test_continuous_rv.py::test_pareto_numeric", "sympy/stats/tests/test_continuous_rv.py::test_raised_cosine", "sympy/stats/tests/test_continuous_rv.py::test_rayleigh", "sympy/stats/tests/test_continuous_rv.py::test_shiftedgompertz", "sympy/stats/tests/test_continuous_rv.py::test_studentt", "sympy/stats/tests/test_continuous_rv.py::test_quadratic_u", "sympy/stats/tests/test_continuous_rv.py::test_uniform", "sympy/stats/tests/test_continuous_rv.py::test_uniform_P", "sympy/stats/tests/test_continuous_rv.py::test_von_mises", "sympy/stats/tests/test_continuous_rv.py::test_weibull", "sympy/stats/tests/test_continuous_rv.py::test_weibull_numeric", "sympy/stats/tests/test_continuous_rv.py::test_wignersemicircle", "sympy/stats/tests/test_continuous_rv.py::test_prefab_sampling", "sympy/stats/tests/test_continuous_rv.py::test_input_value_assertions", "sympy/stats/tests/test_continuous_rv.py::test_probability_unevaluated", "sympy/stats/tests/test_continuous_rv.py::test_density_unevaluated", "sympy/stats/tests/test_continuous_rv.py::test_NormalDistribution", "sympy/stats/tests/test_continuous_rv.py::test_random_parameters", "sympy/stats/tests/test_continuous_rv.py::test_random_parameters_given", "sympy/stats/tests/test_continuous_rv.py::test_conjugate_priors", "sympy/stats/tests/test_continuous_rv.py::test_difficult_univariate", "sympy/stats/tests/test_continuous_rv.py::test_issue_10003" ]
[]
BSD
394
PyCQA__mccabe-38
c9bb16e5c66997153e0347ab5a1ee39e7d2a2e76
2016-01-24 03:31:04
c9bb16e5c66997153e0347ab5a1ee39e7d2a2e76
diff --git a/mccabe.py b/mccabe.py index f5ef5d9..72b8bdd 100644 --- a/mccabe.py +++ b/mccabe.py @@ -133,6 +133,8 @@ class PathGraphingAstVisitor(ASTVisitor): self.graphs["%s%s" % (self.classname, node.name)] = self.graph self.reset() + visitAsyncFunctionDef = visitFunctionDef + def visitClassDef(self, node): old_classname = self.classname self.classname += node.name + "." @@ -158,13 +160,13 @@ class PathGraphingAstVisitor(ASTVisitor): visitAssert = visitAssign = visitAugAssign = visitDelete = visitPrint = \ visitRaise = visitYield = visitImport = visitCall = visitSubscript = \ visitPass = visitContinue = visitBreak = visitGlobal = visitReturn = \ - visitSimpleStatement + visitAwait = visitSimpleStatement def visitLoop(self, node): name = "Loop %d" % node.lineno self._subgraph(node, name) - visitFor = visitWhile = visitLoop + visitAsyncFor = visitFor = visitWhile = visitLoop def visitIf(self, node): name = "If %d" % node.lineno @@ -216,6 +218,8 @@ class PathGraphingAstVisitor(ASTVisitor): self.appendPathNode(name) self.dispatch_list(node.body) + visitAsyncWith = visitWith + class McCabeChecker(object): """McCabe cyclomatic complexity checker.""" diff --git a/setup.cfg b/setup.cfg index 5e40900..519ba68 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,2 +1,5 @@ [wheel] universal = 1 + +[aliases] +test = pytest diff --git a/setup.py b/setup.py index fb8b970..bf8d7d7 100644 --- a/setup.py +++ b/setup.py @@ -33,7 +33,8 @@ setup( license='Expat license', py_modules=['mccabe'], zip_safe=False, - test_suite='test_mccabe', + setup_requires=['pytest-runner'], + tests_require=['pytest'], entry_points={ 'flake8.extension': [ 'C90 = mccabe:McCabeChecker', diff --git a/tox.ini b/tox.ini index a472aa4..6e7e690 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [tox] envlist = - py26,py27,py33,py34,flake8 + py26,py27,py33,py34,py35,flake8 [testenv] deps =
Not working with python 3.5 "async def ..." It looks like mccabe is ignoring python 3.5 coroutines defined like ```python async def foobar(a, b, c): whatever(a, b, c) ``` I tried it via flake8 version: 2.5.1 (pep8: 1.7.0, pyflakes: 1.0.0, mccabe: 0.3.1) CPython 3.5.0+ on Linux
PyCQA/mccabe
diff --git a/test_mccabe.py b/test_mccabe.py index 07d8d78..44fb565 100644 --- a/test_mccabe.py +++ b/test_mccabe.py @@ -5,6 +5,8 @@ try: except ImportError: from io import StringIO +import pytest + import mccabe from mccabe import get_code_complexity @@ -84,6 +86,19 @@ else: print(4) """ +async_keywords = """\ +async def foobar(a, b, c): + await whatever(a, b, c) + if await b: + pass + + async with c: + pass + + async for x in a: + pass +""" + def get_complexity_number(snippet, strio, max=0): """Get the complexity number from the printed string.""" @@ -164,6 +179,13 @@ class McCabeTestCase(unittest.TestCase): def test_try_else(self): self.assert_complexity(try_else, 4) + @pytest.mark.skipif(sys.version_info < (3, 5), + reason="Async keywords are only valid on Python 3.5+") + def test_async_keywords(self): + """Validate that we properly process async keyword usage.""" + complexity = get_complexity_number(async_keywords, self.strio) + self.assertEqual(complexity, 3) + class RegressionTests(unittest.TestCase): def setUp(self):
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 0 }, "num_modified_files": 4 }
0.3
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "pytest", "flake8" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work flake8==7.2.0 iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work -e git+https://github.com/PyCQA/mccabe.git@c9bb16e5c66997153e0347ab5a1ee39e7d2a2e76#egg=mccabe packaging @ file:///croot/packaging_1734472117206/work pluggy @ file:///croot/pluggy_1733169602837/work pycodestyle==2.13.0 pyflakes==3.3.1 pytest @ file:///croot/pytest_1738938843180/work tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
name: mccabe channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - exceptiongroup=1.2.0=py39h06a4308_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - packaging=24.2=py39h06a4308_0 - pip=25.0=py39h06a4308_0 - pluggy=1.5.0=py39h06a4308_0 - pytest=8.3.4=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py39h06a4308_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - flake8==7.2.0 - pycodestyle==2.13.0 - pyflakes==3.3.1 prefix: /opt/conda/envs/mccabe
[ "test_mccabe.py::McCabeTestCase::test_async_keywords" ]
[]
[ "test_mccabe.py::McCabeTestCase::test_for_else_snippet", "test_mccabe.py::McCabeTestCase::test_for_loop_snippet", "test_mccabe.py::McCabeTestCase::test_if_elif_else_dead_path_snippet", "test_mccabe.py::McCabeTestCase::test_nested_functions_snippet", "test_mccabe.py::McCabeTestCase::test_print_message", "test_mccabe.py::McCabeTestCase::test_recursive_snippet", "test_mccabe.py::McCabeTestCase::test_sequential_snippet", "test_mccabe.py::McCabeTestCase::test_sequential_unencapsulated_snippet", "test_mccabe.py::McCabeTestCase::test_trivial", "test_mccabe.py::McCabeTestCase::test_try_else", "test_mccabe.py::RegressionTests::test_max_complexity_is_always_an_int" ]
[]
Expat License
395
guykisel__inline-plz-20
f0fb68fa031c40920731a6f31526f6b455768f37
2016-01-25 06:19:06
f0fb68fa031c40920731a6f31526f6b455768f37
diff --git a/.travis.yml b/.travis.yml index 74e2d43..9f6e2d7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -21,8 +21,7 @@ install: # command to run tests, e.g. python setup.py test script: - tox - - prospector --zero-exit > lint.txt - - inline-plz --parser=prospector --filename=lint.txt + - inline-plz # After you create the Github repo and add it to Travis, run the # travis_pypi_setup.py script to finish PyPI deployment setup diff --git a/inlineplz/env/local.py b/inlineplz/env/local.py index 2a60f74..31f9c9c 100644 --- a/inlineplz/env/local.py +++ b/inlineplz/env/local.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- from __future__ import absolute_import -import subprocess +import os from inlineplz.env.base import EnvBase import inlineplz.util.git as git @@ -9,5 +9,5 @@ import inlineplz.util.git as git class Local(EnvBase): def __init__(self): - if subprocess.check_call(['git status']): + if os.path.exists('.git'): self.commit = git.current_sha() diff --git a/inlineplz/linters/__init__.py b/inlineplz/linters/__init__.py new file mode 100644 index 0000000..e123811 --- /dev/null +++ b/inlineplz/linters/__init__.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- + +"""Linter configurations.""" + +from __future__ import absolute_import + +import os +import subprocess + +from inlineplz import parsers + +LINTERS = { + 'prospector': { + 'run': ['prospector', '--zero-exit'], + 'dotfile': '.prospector.yaml', + 'parser': parsers.ProspectorParser + } +} + + +def lint(): + messages = [] + for config in LINTERS.values(): + if config.get('dotfile') in os.listdir(os.getcwd()): + output = subprocess.check_output(config.get('run')).decode('utf-8') + messages.extend(config.get('parser')().parse(output)) + return messages diff --git a/inlineplz/main.py b/inlineplz/main.py index 073f14d..d9819c3 100644 --- a/inlineplz/main.py +++ b/inlineplz/main.py @@ -8,6 +8,7 @@ import argparse from inlineplz import interfaces from inlineplz import parsers from inlineplz import env +from inlineplz import linters def main(): @@ -17,8 +18,6 @@ def main(): parser.add_argument('--repo', type=str) parser.add_argument('--repo-slug', type=str) parser.add_argument('--token', type=str) - parser.add_argument('--filename', type=str, required=True) - parser.add_argument('--parser', type=str, required=True, choices=parsers.PARSERS) parser.add_argument('--interface', type=str, choices=interfaces.INTERFACES) parser.add_argument('--url', type=str) parser.add_argument('--dryrun', action='store_true') @@ -34,8 +33,6 @@ def inline(args): Parse input file with the specified parser and post messages based on lint output :param args: Contains the following - filename: Linter output - parser: Use a different parser based on the lint tool interface: How are we going to post comments? owner: Username of repo owner repo: Repository name @@ -53,9 +50,8 @@ def inline(args): owner = args.owner repo = args.repo - with open(args.filename) as inputfile: - my_parser = parsers.PARSERS[args.parser]() - messages = my_parser.parse(inputfile.read()) + messages = linters.lint() + # TODO: implement dryrun as an interface instead of a special case here if args.dryrun: for msg in messages: diff --git a/inlineplz/message.py b/inlineplz/message.py index 2191043..61011a4 100644 --- a/inlineplz/message.py +++ b/inlineplz/message.py @@ -18,4 +18,8 @@ Message: @property def content(self): - return '```\n' + '\n'.join(self.comments) + '\n```' + if not self.comments: + return '' + if len(self.comments) > 1: + return '```\n' + '\n'.join(self.comments) + '\n```' + return '`{0}`'.format(self.comments[0].strip())
detect when running in ci and run/configure automatically if CI envvars (jenkins/travis/etc) are present, then automatically look up the current console log, find linter command line invocations, and parse them.
guykisel/inline-plz
diff --git a/tests/parsers/test_prospector.py b/tests/parsers/test_prospector.py index 2913435..f673b81 100644 --- a/tests/parsers/test_prospector.py +++ b/tests/parsers/test_prospector.py @@ -17,10 +17,14 @@ prospector_path = os.path.join( def test_prospector(): with open(prospector_path) as inputfile: messages = prospector.ProspectorParser().parse(inputfile.read()) - assert messages[0].content == '```\npylint: syntax-error / invalid syntax\n```' + assert messages[0].content == '`pylint: syntax-error / invalid syntax`' assert messages[0].line_number == 34 assert messages[0].path == 'docs/conf.py' - assert messages[1].content == '```\npylint: unused-import / Unused Message imported from message\n```' + assert messages[1].content == '`pylint: unused-import / Unused Message imported from message`' assert messages[1].line_number == 4 assert messages[1].path == 'inline-plz/parsers/base.py' + assert messages[9].content == ('```\npylint: misplaced-comparison-constant / Comparison ' + 'should be __name__ == \'__main__\' (col 3)\npylint: ' + 'pretend this is a real message\n```') + assert messages[9].line_number == 113 assert len(messages) == 11 diff --git a/tests/testdata/parsers/prospector.txt b/tests/testdata/parsers/prospector.txt index 7402079..7c9ed99 100644 --- a/tests/testdata/parsers/prospector.txt +++ b/tests/testdata/parsers/prospector.txt @@ -28,6 +28,7 @@ travis_pypi_setup.py pylint: bare-except / No exception type(s) specified Line: 113 pylint: misplaced-comparison-constant / Comparison should be __name__ == '__main__' (col 3) + pylint: pretend this is a real message Line: 114 pylint: wrong-import-position / Import "import argparse" should be placed at the top of the module (col 4)
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 2, "test_score": 3 }, "num_modified_files": 4 }
unknown
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "numpy>=1.16.0", "pandas>=1.0.0", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi==2025.1.31 cffi==1.17.1 charset-normalizer==3.4.1 cryptography==44.0.2 exceptiongroup==1.2.2 github3.py==4.0.1 idna==3.10 iniconfig==2.1.0 -e git+https://github.com/guykisel/inline-plz.git@f0fb68fa031c40920731a6f31526f6b455768f37#egg=inlineplz numpy==2.0.2 packaging==24.2 pandas==2.2.3 pluggy==1.5.0 pycparser==2.22 PyJWT==2.10.1 pytest==8.3.5 python-dateutil==2.9.0.post0 pytz==2025.2 requests==2.32.3 six==1.17.0 tomli==2.2.1 tzdata==2025.2 unidiff==0.7.5 uritemplate==4.1.1 urllib3==2.3.0
name: inline-plz channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - certifi==2025.1.31 - cffi==1.17.1 - charset-normalizer==3.4.1 - cryptography==44.0.2 - exceptiongroup==1.2.2 - github3-py==4.0.1 - idna==3.10 - iniconfig==2.1.0 - numpy==2.0.2 - packaging==24.2 - pandas==2.2.3 - pluggy==1.5.0 - pycparser==2.22 - pyjwt==2.10.1 - pytest==8.3.5 - python-dateutil==2.9.0.post0 - pytz==2025.2 - requests==2.32.3 - six==1.17.0 - tomli==2.2.1 - tzdata==2025.2 - unidiff==0.7.5 - uritemplate==4.1.1 - urllib3==2.3.0 prefix: /opt/conda/envs/inline-plz
[ "tests/parsers/test_prospector.py::test_prospector" ]
[]
[]
[]
ISC License
396
sympy__sympy-10470
9ec2b851ac30e2f1ac13bc68d0fa30d63002d5d5
2016-01-25 18:56:01
0241b35cae5f2adc04dc37b25f7dc9c5f00bd746
diff --git a/sympy/printing/latex.py b/sympy/printing/latex.py index 46d81b7cd8..4b8e799d92 100644 --- a/sympy/printing/latex.py +++ b/sympy/printing/latex.py @@ -1574,9 +1574,9 @@ def _print_ConditionSet(self, s): self._print(s.condition.as_expr())) def _print_ComplexRegion(self, s): - vars_print = ', '.join([self._print(var) for var in s.args[0].variables]) + vars_print = ', '.join([self._print(var) for var in s.variables]) return r"\left\{%s\; |\; %s \in %s \right\}" % ( - self._print(s.args[0].expr), + self._print(s.expr), vars_print, self._print(s.sets)) diff --git a/sympy/printing/pretty/pretty.py b/sympy/printing/pretty/pretty.py index dc910fc4ce..644c00953f 100644 --- a/sympy/printing/pretty/pretty.py +++ b/sympy/printing/pretty/pretty.py @@ -1567,8 +1567,8 @@ def _print_ComplexRegion(self, ts): inn = u("\N{SMALL ELEMENT OF}") else: inn = 'in' - variables = self._print_seq(ts.args[0].variables) - expr = self._print(ts.args[0].expr) + variables = self._print_seq(ts.variables) + expr = self._print(ts.expr) bar = self._print("|") prodsets = self._print(ts.sets) diff --git a/sympy/sets/fancysets.py b/sympy/sets/fancysets.py index 1a45f9ae70..3cb2827588 100644 --- a/sympy/sets/fancysets.py +++ b/sympy/sets/fancysets.py @@ -568,7 +568,7 @@ class ComplexRegion(Set): >>> c = Interval(1, 8) >>> c1 = ComplexRegion(a*b) # Rectangular Form >>> c1 - ComplexRegion(Lambda((_x, _y), _x + _y*I), [2, 3] x [4, 6]) + ComplexRegion([2, 3] x [4, 6], False) * c1 represents the rectangular region in complex plane surrounded by the coordinates (2, 4), (3, 4), (3, 6) and @@ -576,8 +576,7 @@ class ComplexRegion(Set): >>> c2 = ComplexRegion(Union(a*b, b*c)) >>> c2 - ComplexRegion(Lambda((_x, _y), _x + _y*I), - [2, 3] x [4, 6] U [4, 6] x [1, 8]) + ComplexRegion([2, 3] x [4, 6] U [4, 6] x [1, 8], False) * c2 represents the Union of two rectangular regions in complex plane. One of them surrounded by the coordinates of c1 and @@ -593,8 +592,7 @@ class ComplexRegion(Set): >>> theta = Interval(0, 2*S.Pi) >>> c2 = ComplexRegion(r*theta, polar=True) # Polar Form >>> c2 # unit Disk - ComplexRegion(Lambda((_r, _theta), _r*(I*sin(_theta) + cos(_theta))), - [0, 1] x [0, 2*pi)) + ComplexRegion([0, 1] x [0, 2*pi), True) * c2 represents the region in complex plane inside the Unit Disk centered at the origin. @@ -608,7 +606,7 @@ class ComplexRegion(Set): >>> upper_half_unit_disk = ComplexRegion(Interval(0, 1)*Interval(0, S.Pi), polar=True) >>> intersection = unit_disk.intersect(upper_half_unit_disk) >>> intersection - ComplexRegion(Lambda((_r, _theta), _r*(I*sin(_theta) + cos(_theta))), [0, 1] x [0, pi]) + ComplexRegion([0, 1] x [0, pi], True) >>> intersection == upper_half_unit_disk True @@ -621,14 +619,14 @@ class ComplexRegion(Set): is_ComplexRegion = True def __new__(cls, sets, polar=False): - from sympy import symbols, Dummy + from sympy import symbols, Dummy, sympify, sin, cos x, y, r, theta = symbols('x, y, r, theta', cls=Dummy) I = S.ImaginaryUnit + polar = sympify(polar) # Rectangular Form - if polar is False: - + if polar == False: if all(_a.is_FiniteSet for _a in sets.args) and (len(sets.args) == 2): # ** ProductSet of FiniteSets in the Complex Plane. ** @@ -639,34 +637,38 @@ def __new__(cls, sets, polar=False): for y in sets.args[1]: complex_num.append(x + I*y) obj = FiniteSet(*complex_num) - else: obj = ImageSet.__new__(cls, Lambda((x, y), x + I*y), sets) + obj._variables = (x, y) + obj._expr = x + I*y # Polar Form - elif polar is True: + elif polar == True: new_sets = [] - # sets is Union of ProductSets if not sets.is_ProductSet: for k in sets.args: new_sets.append(k) - # sets is ProductSets else: new_sets.append(sets) - # Normalize input theta for k, v in enumerate(new_sets): from sympy.sets import ProductSet new_sets[k] = ProductSet(v.args[0], normalize_theta_set(v.args[1])) sets = Union(*new_sets) - - from sympy import cos, sin obj = ImageSet.__new__(cls, Lambda((r, theta), r*(cos(theta) + I*sin(theta))), sets) + obj._variables = (r, theta) + obj._expr = r*(cos(theta) + I*sin(theta)) + + else: + raise ValueError("polar should be either True or False") + + obj._sets = sets + obj._polar = polar return obj @property @@ -689,7 +691,19 @@ def sets(self): [2, 3] x [4, 5] U [4, 5] x [1, 7] """ - return self.args[1] + return self._sets + + @property + def args(self): + return (self._sets, self._polar) + + @property + def variables(self): + return self._variables + + @property + def expr(self): + return self._expr @property def psets(self): @@ -711,11 +725,11 @@ def psets(self): ([2, 3] x [4, 5], [4, 5] x [1, 7]) """ - if self.args[1].is_ProductSet: + if self.sets.is_ProductSet: psets = () - psets = psets + (self.args[1], ) + psets = psets + (self.sets, ) else: - psets = self.args[1].args + psets = self.sets.args return psets @property @@ -795,7 +809,7 @@ def polar(self): >>> C2.polar True """ - return self.args[0].args[1].is_Mul + return self._polar @property def _measure(self):
simplification of Unions of ComplexRegions ``` >>> c1 = ComplexRegion(Interval(1, 2)*Interval(2, 3)) >>> c2 = ComplexRegion(Interval(1, 5)*Interval(1, 3)) >>> simplify(Union(c1, c2)) --------------------------------------------------------------------------- UnboundLocalError Traceback (most recent call last) <ipython-input-20-c14828895520> in <module>() ----> 1 simplify(Union(c1, c2)) /home/gxyd/Public/sympy/sympy/simplify/simplify.py in simplify(expr, ratio, measure, fu) 516 if not isinstance(expr, (Add, Mul, Pow, ExpBase)): 517 return expr.func(*[simplify(x, ratio=ratio, measure=measure, fu=fu) --> 518 for x in expr.args]) 519 520 # TODO: Apply different strategies, considering expression pattern: /home/gxyd/Public/sympy/sympy/sets/fancysets.py in __new__(cls, sets, polar) 673 r*(cos(theta) + I*sin(theta))), 674 sets) --> 675 return obj 676 677 @property UnboundLocalError: local variable 'obj' referenced before assignment ```
sympy/sympy
diff --git a/sympy/sets/tests/test_fancysets.py b/sympy/sets/tests/test_fancysets.py index 002f5d7537..d8d5028f98 100644 --- a/sympy/sets/tests/test_fancysets.py +++ b/sympy/sets/tests/test_fancysets.py @@ -3,6 +3,7 @@ ComplexRegion) from sympy.sets.sets import (FiniteSet, Interval, imageset, EmptySet, Union, Intersection) +from sympy.simplify.simplify import simplify from sympy import (S, Symbol, Lambda, symbols, cos, sin, pi, oo, Basic, Rational, sqrt, tan, log, Abs, I) from sympy.utilities.pytest import XFAIL, raises @@ -460,3 +461,13 @@ def test_ComplexRegion_FiniteSet(): def test_union_RealSubSet(): assert (S.Complexes).union(Interval(1, 2)) == S.Complexes assert (S.Complexes).union(S.Integers) == S.Complexes + + +def test_issue_9980(): + c1 = ComplexRegion(Interval(1, 2)*Interval(2, 3)) + c2 = ComplexRegion(Interval(1, 5)*Interval(1, 3)) + R = Union(c1, c2) + assert simplify(R) == ComplexRegion(Union(Interval(1, 2)*Interval(2, 3), \ + Interval(1, 5)*Interval(1, 3)), False) + assert c1.func(*c1.args) == c1 + assert R.func(*R.args) == R
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 3 }, "num_modified_files": 3 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "pytest", "pip_packages": [ "mpmath>=0.19", "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.7", "reqs_path": null, "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs @ file:///croot/attrs_1668696182826/work certifi @ file:///croot/certifi_1671487769961/work/certifi flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1648562407465/work iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work mpmath==1.3.0 packaging @ file:///croot/packaging_1671697413597/work pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work py @ file:///opt/conda/conda-bld/py_1644396412707/work pytest==7.1.2 -e git+https://github.com/sympy/sympy.git@9ec2b851ac30e2f1ac13bc68d0fa30d63002d5d5#egg=sympy tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work typing_extensions @ file:///croot/typing_extensions_1669924550328/work zipp @ file:///croot/zipp_1672387121353/work
name: sympy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - attrs=22.1.0=py37h06a4308_0 - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - flit-core=3.6.0=pyhd3eb1b0_0 - importlib-metadata=4.11.3=py37h06a4308_0 - importlib_metadata=4.11.3=hd3eb1b0_0 - iniconfig=1.1.1=pyhd3eb1b0_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - packaging=22.0=py37h06a4308_0 - pip=22.3.1=py37h06a4308_0 - pluggy=1.0.0=py37h06a4308_1 - py=1.11.0=pyhd3eb1b0_0 - pytest=7.1.2=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tomli=2.0.1=py37h06a4308_0 - typing_extensions=4.4.0=py37h06a4308_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zipp=3.11.0=py37h06a4308_0 - zlib=1.2.13=h5eee18b_1 - pip: - mpmath==1.3.0 prefix: /opt/conda/envs/sympy
[ "sympy/sets/tests/test_fancysets.py::test_issue_9980" ]
[]
[ "sympy/sets/tests/test_fancysets.py::test_naturals", "sympy/sets/tests/test_fancysets.py::test_naturals0", "sympy/sets/tests/test_fancysets.py::test_integers", "sympy/sets/tests/test_fancysets.py::test_ImageSet", "sympy/sets/tests/test_fancysets.py::test_image_is_ImageSet", "sympy/sets/tests/test_fancysets.py::test_ImageSet_iterator_not_injetive", "sympy/sets/tests/test_fancysets.py::test_Range", "sympy/sets/tests/test_fancysets.py::test_range_interval_intersection", "sympy/sets/tests/test_fancysets.py::test_fun", "sympy/sets/tests/test_fancysets.py::test_Reals", "sympy/sets/tests/test_fancysets.py::test_Complex", "sympy/sets/tests/test_fancysets.py::test_intersections", "sympy/sets/tests/test_fancysets.py::test_infinitely_indexed_set_1", "sympy/sets/tests/test_fancysets.py::test_infinitely_indexed_set_2", "sympy/sets/tests/test_fancysets.py::test_imageset_intersect_real", "sympy/sets/tests/test_fancysets.py::test_ImageSet_simplification", "sympy/sets/tests/test_fancysets.py::test_ComplexRegion_contains", "sympy/sets/tests/test_fancysets.py::test_ComplexRegion_intersect", "sympy/sets/tests/test_fancysets.py::test_ComplexRegion_union", "sympy/sets/tests/test_fancysets.py::test_ComplexRegion_measure", "sympy/sets/tests/test_fancysets.py::test_normalize_theta_set", "sympy/sets/tests/test_fancysets.py::test_ComplexRegion_FiniteSet", "sympy/sets/tests/test_fancysets.py::test_union_RealSubSet" ]
[]
BSD
397
sympy__sympy-10478
bdb181bbc640df4f1e27ebe42a385da5780f0fa2
2016-01-26 12:25:09
0241b35cae5f2adc04dc37b25f7dc9c5f00bd746
jksuom: I looks like you have based the PR on a fairly old version of SymPy. It may be best to pull an up-to-date version first and then start afresh. As to the values of factorial2 for negative integers, not all of them need be infinite. It is possible to define finite values for negative odd arguments by a recurrence relation or rewrite formula.
diff --git a/sympy/functions/combinatorial/factorials.py b/sympy/functions/combinatorial/factorials.py index ea9e5a5ec0..48ea175555 100644 --- a/sympy/functions/combinatorial/factorials.py +++ b/sympy/functions/combinatorial/factorials.py @@ -330,8 +330,8 @@ def eval(cls, arg): # TODO: extend this to complex numbers? if arg.is_Number: - if arg.is_infinite: - return + if not arg.is_Integer: + raise ValueError("argument must be nonnegative integer or negative odd integer") # This implementation is faster than the recursive one # It also avoids "maximum recursion depth exceeded" runtime error @@ -339,13 +339,13 @@ def eval(cls, arg): if arg.is_even: k = arg / 2 return 2 ** k * factorial(k) - return factorial(arg) / factorial2(arg - 1) - if arg.is_even: - raise ValueError("argument must be nonnegative or odd") - return arg * (S.NegativeOne) ** ((1 - arg) / 2) / factorial2(-arg) + if arg.is_odd: + return arg * (S.NegativeOne) ** ((1 - arg) / 2) / factorial2(-arg) + raise ValueError("argument must be nonnegative integer or negative odd integer") + def _eval_is_even(self): # Double factorial is even for every positive even input
`factorial2` runs into `RunTimeError` for non-integer ``` >>> from sympy import factorial2, S >>> factorial2(S(5)/2) File "/home/gxyd/Public/sympy/sympy/core/function.py", line 200, in __new__ evaluated = cls.eval(*args) File "/home/gxyd/Public/sympy/sympy/functions/combinatorial/factorials.py", line 348, in eval return arg * (S.NegativeOne) ** ((1 - arg) / 2) / factorial2(-arg) File "/home/gxyd/Public/sympy/sympy/core/cache.py", line 93, in wrapper retval = cfunc(*args, **kwargs) File "/usr/lib/python3.4/functools.py", line 472, in wrapper result = user_function(*args, **kwds) File "/home/gxyd/Public/sympy/sympy/core/function.py", line 376, in __new__ result = super(Function, cls).__new__(cls, *args, **options) File "/home/gxyd/Public/sympy/sympy/core/cache.py", line 93, in wrapper retval = cfunc(*args, **kwargs) File "/usr/lib/python3.4/functools.py", line 472, in wrapper result = user_function(*args, **kwds) File "/home/gxyd/Public/sympy/sympy/core/function.py", line 200, in __new__ evaluated = cls.eval(*args) File "/home/gxyd/Public/sympy/sympy/functions/combinatorial/factorials.py", line 343, in eval return factorial(arg) / factorial2(arg - 1) File "/home/gxyd/Public/sympy/sympy/core/cache.py", line 93, in wrapper retval = cfunc(*args, **kwargs) File "/usr/lib/python3.4/functools.py", line 472, in wrapper result = user_function(*args, **kwds) File "/home/gxyd/Public/sympy/sympy/core/function.py", line 376, in __new__ result = super(Function, cls).__new__(cls, *args, **options) File "/home/gxyd/Public/sympy/sympy/core/cache.py", line 93, in wrapper retval = cfunc(*args, **kwargs) File "/usr/lib/python3.4/functools.py", line 472, in wrapper result = user_function(*args, **kwds) File "/home/gxyd/Public/sympy/sympy/core/function.py", line 200, in __new__ evaluated = cls.eval(*args) File "/home/gxyd/Public/sympy/sympy/functions/combinatorial/factorials.py", line 348, in eval return arg * (S.NegativeOne) ** ((1 - arg) / 2) / factorial2(-arg) File "/home/gxyd/Public/sympy/sympy/core/decorators.py", line 76, in __sympifyit_wrapper b = sympify(b, strict=True) File "/home/gxyd/Public/sympy/sympy/core/sympify.py", line 253, in sympify return converter[cls](a) File "/home/gxyd/Public/sympy/sympy/core/numbers.py", line 1666, in __new__ if isinstance(i, string_types): RuntimeError: maximum recursion depth exceeded while calling a Python object ```
sympy/sympy
diff --git a/sympy/functions/combinatorial/tests/test_comb_factorials.py b/sympy/functions/combinatorial/tests/test_comb_factorials.py index 9b54ea1019..1886a053ba 100644 --- a/sympy/functions/combinatorial/tests/test_comb_factorials.py +++ b/sympy/functions/combinatorial/tests/test_comb_factorials.py @@ -3,8 +3,9 @@ oo, zoo, simplify, expand_func, Product, I, Piecewise, Mod, Eq, sqrt) from sympy.functions.combinatorial.factorials import subfactorial from sympy.functions.special.gamma_functions import uppergamma -from sympy.utilities.pytest import XFAIL +from sympy.utilities.pytest import XFAIL, raises +#Solves and Fixes Issue #10388 - This is the updated test for the same solved issue def test_rf_eval_apply(): x, y = symbols('x,y') @@ -194,7 +195,9 @@ def test_factorial2(): nt = Symbol('nt', nonnegative=True) nf = Symbol('nf', nonnegative=False) nn = Symbol('nn') - + #Solves and Fixes Issue #10388 - This is the updated test for the same solved issue + raises (ValueError, lambda: factorial2(oo)) + raises (ValueError, lambda: factorial2(S(5)/2)) assert factorial2(n).is_integer is None assert factorial2(tt - 1).is_integer assert factorial2(tte - 1).is_integer
{ "commit_name": "head_commit", "failed_lite_validators": [], "has_test_patch": true, "is_lite": true, "llm_score": { "difficulty_score": 1, "issue_text_score": 0, "test_score": 0 }, "num_modified_files": 1 }
0.7
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .[dev]", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "mpmath>=0.19", "pytest", "pytest-cov", "pytest-xdist", "pytest-mock", "pytest-asyncio" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.7", "reqs_path": [ "requirements/base.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
certifi @ file:///croot/certifi_1671487769961/work/certifi coverage==7.2.7 exceptiongroup==1.2.2 execnet==2.0.2 importlib-metadata==6.7.0 iniconfig==2.0.0 mpmath==1.3.0 packaging==24.0 pluggy==1.2.0 pytest==7.4.4 pytest-asyncio==0.21.2 pytest-cov==4.1.0 pytest-mock==3.11.1 pytest-xdist==3.5.0 -e git+https://github.com/sympy/sympy.git@bdb181bbc640df4f1e27ebe42a385da5780f0fa2#egg=sympy tomli==2.0.1 typing_extensions==4.7.1 zipp==3.15.0
name: sympy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - certifi=2022.12.7=py37h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=1.1.1w=h7f8727e_0 - pip=22.3.1=py37h06a4308_0 - python=3.7.16=h7a1cb2a_0 - readline=8.2=h5eee18b_0 - setuptools=65.6.3=py37h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - wheel=0.38.4=py37h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - coverage==7.2.7 - exceptiongroup==1.2.2 - execnet==2.0.2 - importlib-metadata==6.7.0 - iniconfig==2.0.0 - mpmath==1.3.0 - packaging==24.0 - pluggy==1.2.0 - pytest==7.4.4 - pytest-asyncio==0.21.2 - pytest-cov==4.1.0 - pytest-mock==3.11.1 - pytest-xdist==3.5.0 - tomli==2.0.1 - typing-extensions==4.7.1 - zipp==3.15.0 prefix: /opt/conda/envs/sympy
[ "sympy/functions/combinatorial/tests/test_comb_factorials.py::test_factorial2" ]
[ "sympy/functions/combinatorial/tests/test_comb_factorials.py::test_factorial_simplify_fail" ]
[ "sympy/functions/combinatorial/tests/test_comb_factorials.py::test_rf_eval_apply", "sympy/functions/combinatorial/tests/test_comb_factorials.py::test_ff_eval_apply", "sympy/functions/combinatorial/tests/test_comb_factorials.py::test_factorial", "sympy/functions/combinatorial/tests/test_comb_factorials.py::test_factorial_diff", "sympy/functions/combinatorial/tests/test_comb_factorials.py::test_factorial_series", "sympy/functions/combinatorial/tests/test_comb_factorials.py::test_factorial_rewrite", "sympy/functions/combinatorial/tests/test_comb_factorials.py::test_factorial2_rewrite", "sympy/functions/combinatorial/tests/test_comb_factorials.py::test_binomial", "sympy/functions/combinatorial/tests/test_comb_factorials.py::test_binomial_diff", "sympy/functions/combinatorial/tests/test_comb_factorials.py::test_binomial_rewrite", "sympy/functions/combinatorial/tests/test_comb_factorials.py::test_subfactorial" ]
[]
BSD
398
scrapy__scrapy-1735
7d24df37380cd5a5b7394cd2534e240bd2eff0ca
2016-01-27 15:11:13
6aa85aee2a274393307ac3e777180fcbdbdc9848
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md deleted file mode 100644 index 95b4a7e3c..000000000 --- a/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,50 +0,0 @@ -# Contributor Code of Conduct - -As contributors and maintainers of this project, and in the interest of -fostering an open and welcoming community, we pledge to respect all people who -contribute through reporting issues, posting feature requests, updating -documentation, submitting pull requests or patches, and other activities. - -We are committed to making participation in this project a harassment-free -experience for everyone, regardless of level of experience, gender, gender -identity and expression, sexual orientation, disability, personal appearance, -body size, race, ethnicity, age, religion, or nationality. - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery -* Personal attacks -* Trolling or insulting/derogatory comments -* Public or private harassment -* Publishing other's private information, such as physical or electronic - addresses, without explicit permission -* Other unethical or unprofessional conduct - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -By adopting this Code of Conduct, project maintainers commit themselves to -fairly and consistently applying these principles to every aspect of managing -this project. Project maintainers who do not follow or enforce the Code of -Conduct may be permanently removed from the project team. - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting a project maintainer at [email protected]. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. Maintainers are -obligated to maintain confidentiality with regard to the reporter of an -incident. - - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 1.3.0, available at -[http://contributor-covenant.org/version/1/3/0/][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/3/0/ diff --git a/README.rst b/README.rst index 3e050bb1e..6cbed75ee 100644 --- a/README.rst +++ b/README.rst @@ -73,12 +73,6 @@ See http://scrapy.org/community/ Contributing ============ -Please note that this project is released with a Contributor Code of Conduct -(see https://github.com/scrapy/scrapy/blob/master/CODE_OF_CONDUCT.md). - -By participating in this project you agree to abide by its terms. -Please report unacceptable behavior to [email protected]. - See http://doc.scrapy.org/en/master/contributing.html Companies using Scrapy diff --git a/docs/topics/request-response.rst b/docs/topics/request-response.rst index 82e674cee..ea64d1599 100644 --- a/docs/topics/request-response.rst +++ b/docs/topics/request-response.rst @@ -445,10 +445,10 @@ Response objects .. attribute:: Response.body - The body of this Response. Keep in mind that Response.body - is always a bytes object. If you want the unicode version use - :attr:`TextResponse.text` (only available in :class:`TextResponse` - and subclasses). + A str containing the body of this Response. Keep in mind that Response.body + is always a str. If you want the unicode version use + :meth:`TextResponse.body_as_unicode` (only available in + :class:`TextResponse` and subclasses). This attribute is read-only. To change the body of a Response use :meth:`replace`. @@ -542,21 +542,6 @@ TextResponse objects :class:`TextResponse` objects support the following attributes in addition to the standard :class:`Response` ones: - .. attribute:: TextResponse.text - - Response body, as unicode. - - The same as ``response.body.decode(response.encoding)``, but the - result is cached after the first call, so you can access - ``response.text`` multiple times without extra overhead. - - .. note:: - - ``unicode(response.body)`` is not a correct way to convert response - body to unicode: you would be using the system default encoding - (typically `ascii`) instead of the response encoding. - - .. attribute:: TextResponse.encoding A string with the encoding of this response. The encoding is resolved by @@ -583,6 +568,20 @@ TextResponse objects :class:`TextResponse` objects support the following methods in addition to the standard :class:`Response` ones: + .. method:: TextResponse.body_as_unicode() + + Returns the body of the response as unicode. This is equivalent to:: + + response.body.decode(response.encoding) + + But **not** equivalent to:: + + unicode(response.body) + + Since, in the latter case, you would be using the system default encoding + (typically `ascii`) to convert the body to unicode, instead of the response + encoding. + .. method:: TextResponse.xpath(query) A shortcut to ``TextResponse.selector.xpath(query)``:: @@ -595,11 +594,6 @@ TextResponse objects response.css('p') - .. method:: TextResponse.body_as_unicode() - - The same as :attr:`text`, but available as a method. This method is - kept for backwards compatibility; please prefer ``response.text``. - HtmlResponse objects -------------------- diff --git a/scrapy/downloadermiddlewares/ajaxcrawl.py b/scrapy/downloadermiddlewares/ajaxcrawl.py index da373eca2..6b543b823 100644 --- a/scrapy/downloadermiddlewares/ajaxcrawl.py +++ b/scrapy/downloadermiddlewares/ajaxcrawl.py @@ -63,7 +63,7 @@ class AjaxCrawlMiddleware(object): Return True if a page without hash fragment could be "AJAX crawlable" according to https://developers.google.com/webmasters/ajax-crawling/docs/getting-started. """ - body = response.text[:self.lookup_bytes] + body = response.body_as_unicode()[:self.lookup_bytes] return _has_ajaxcrawlable_meta(body) diff --git a/scrapy/downloadermiddlewares/robotstxt.py b/scrapy/downloadermiddlewares/robotstxt.py index d4a33dc36..6fdba90cc 100644 --- a/scrapy/downloadermiddlewares/robotstxt.py +++ b/scrapy/downloadermiddlewares/robotstxt.py @@ -83,8 +83,8 @@ class RobotsTxtMiddleware(object): def _parse_robots(self, response, netloc): rp = robotparser.RobotFileParser(response.url) body = '' - if hasattr(response, 'text'): - body = response.text + if hasattr(response, 'body_as_unicode'): + body = response.body_as_unicode() else: # last effort try try: body = response.body.decode('utf-8') @@ -101,4 +101,6 @@ class RobotsTxtMiddleware(object): rp_dfd.callback(rp) def _robots_error(self, failure, netloc): - self._parsers.pop(netloc).callback(None) + rp_dfd = self._parsers[netloc] + self._parsers[netloc] = None + rp_dfd.callback(None) diff --git a/scrapy/http/request/form.py b/scrapy/http/request/form.py index 2862dc096..5501634d3 100644 --- a/scrapy/http/request/form.py +++ b/scrapy/http/request/form.py @@ -64,8 +64,8 @@ def _urlencode(seq, enc): def _get_form(response, formname, formid, formnumber, formxpath): """Find the form element """ - root = create_root_node(response.text, lxml.html.HTMLParser, - base_url=get_base_url(response)) + text = response.body_as_unicode() + root = create_root_node(text, lxml.html.HTMLParser, base_url=get_base_url(response)) forms = root.xpath('//form') if not forms: raise ValueError("No <form> element found in %s" % response) diff --git a/scrapy/http/response/text.py b/scrapy/http/response/text.py index 9c667ab7e..1c416bf82 100644 --- a/scrapy/http/response/text.py +++ b/scrapy/http/response/text.py @@ -67,11 +67,6 @@ class TextResponse(Response): self._cached_ubody = html_to_unicode(charset, self.body)[1] return self._cached_ubody - @property - def text(self): - """ Body as unicode """ - return self.body_as_unicode() - def urljoin(self, url): """Join this Response's url with a possible relative url to form an absolute interpretation of the latter.""" diff --git a/scrapy/selector/unified.py b/scrapy/selector/unified.py index 15f3d26df..5d77f7624 100644 --- a/scrapy/selector/unified.py +++ b/scrapy/selector/unified.py @@ -60,7 +60,7 @@ class Selector(_ParselSelector, object_ref): response = _response_from_text(text, st) if response is not None: - text = response.text + text = response.body_as_unicode() kwargs.setdefault('base_url', response.url) self.response = response diff --git a/scrapy/utils/iterators.py b/scrapy/utils/iterators.py index 73857b410..b0688791e 100644 --- a/scrapy/utils/iterators.py +++ b/scrapy/utils/iterators.py @@ -137,7 +137,7 @@ def _body_or_str(obj, unicode=True): if not unicode: return obj.body elif isinstance(obj, TextResponse): - return obj.text + return obj.body_as_unicode() else: return obj.body.decode('utf-8') elif isinstance(obj, six.text_type): diff --git a/scrapy/utils/response.py b/scrapy/utils/response.py index 73db2641e..c4ad52f14 100644 --- a/scrapy/utils/response.py +++ b/scrapy/utils/response.py @@ -25,7 +25,7 @@ _baseurl_cache = weakref.WeakKeyDictionary() def get_base_url(response): """Return the base url of the given response, joined with the response url""" if response not in _baseurl_cache: - text = response.text[0:4096] + text = response.body_as_unicode()[0:4096] _baseurl_cache[response] = html.get_base_url(text, response.url, response.encoding) return _baseurl_cache[response] @@ -37,7 +37,7 @@ _metaref_cache = weakref.WeakKeyDictionary() def get_meta_refresh(response): """Parse the http-equiv refrsh parameter from the given response""" if response not in _metaref_cache: - text = response.text[0:4096] + text = response.body_as_unicode()[0:4096] text = _noscript_re.sub(u'', text) text = _script_re.sub(u'', text) _metaref_cache[response] = html.get_meta_refresh(text, response.url,
KeyError in robotstxt middleware I'm getting these errors in robots.txt middleware: ``` 2016-01-27 16:18:21 [scrapy.core.scraper] ERROR: Error downloading <GET http://yellowpages.co.th> Traceback (most recent call last): File "/Users/kmike/envs/scraping/lib/python2.7/site-packages/twisted/internet/defer.py", line 150, in maybeDeferred result = f(*args, **kw) File "/Users/kmike/svn/scrapy/scrapy/downloadermiddlewares/robotstxt.py", line 65, in robot_parser if isinstance(self._parsers[netloc], Deferred): KeyError: 'yellowpages.co.th' ``` It looks like https://github.com/scrapy/scrapy/pull/1473 caused it (I can't get this issue in Scrapy 1.0.4, but it present in Scrapy master). It happens when page failed to download and HTTP cache is enabled. I haven't debugged it further.
scrapy/scrapy
diff --git a/tests/test_downloadermiddleware_robotstxt.py b/tests/test_downloadermiddleware_robotstxt.py index 5f45dcb82..f2e94e171 100644 --- a/tests/test_downloadermiddleware_robotstxt.py +++ b/tests/test_downloadermiddleware_robotstxt.py @@ -123,6 +123,18 @@ class RobotsTxtMiddlewareTest(unittest.TestCase): deferred.addCallback(lambda _: self.assertTrue(middleware._logerror.called)) return deferred + def test_robotstxt_immediate_error(self): + self.crawler.settings.set('ROBOTSTXT_OBEY', True) + err = error.DNSLookupError('Robotstxt address not found') + def immediate_failure(request, spider): + deferred = Deferred() + deferred.errback(failure.Failure(err)) + return deferred + self.crawler.engine.download.side_effect = immediate_failure + + middleware = RobotsTxtMiddleware(self.crawler) + return self.assertNotIgnored(Request('http://site.local'), middleware) + def test_ignore_robotstxt_request(self): self.crawler.settings.set('ROBOTSTXT_OBEY', True) def ignore_request(request, spider): diff --git a/tests/test_engine.py b/tests/test_engine.py index baf6ef1bf..9f2c02bff 100644 --- a/tests/test_engine.py +++ b/tests/test_engine.py @@ -55,11 +55,12 @@ class TestSpider(Spider): def parse_item(self, response): item = self.item_cls() - m = self.name_re.search(response.text) + body = response.body_as_unicode() + m = self.name_re.search(body) if m: item['name'] = m.group(1) item['url'] = response.url - m = self.price_re.search(response.text) + m = self.price_re.search(body) if m: item['price'] = m.group(1) return item diff --git a/tests/test_http_response.py b/tests/test_http_response.py index c7f36687a..710a5b29d 100644 --- a/tests/test_http_response.py +++ b/tests/test_http_response.py @@ -107,11 +107,9 @@ class BaseResponseTest(unittest.TestCase): body_bytes = body assert isinstance(response.body, bytes) - assert isinstance(response.text, six.text_type) self._assert_response_encoding(response, encoding) self.assertEqual(response.body, body_bytes) self.assertEqual(response.body_as_unicode(), body_unicode) - self.assertEqual(response.text, body_unicode) def _assert_response_encoding(self, response, encoding): self.assertEqual(response.encoding, resolve_encoding(encoding)) @@ -173,10 +171,6 @@ class TextResponseTest(BaseResponseTest): self.assertTrue(isinstance(r1.body_as_unicode(), six.text_type)) self.assertEqual(r1.body_as_unicode(), unicode_string) - # check response.text - self.assertTrue(isinstance(r1.text, six.text_type)) - self.assertEqual(r1.text, unicode_string) - def test_encoding(self): r1 = self.response_class("http://www.example.com", headers={"Content-type": ["text/html; charset=utf-8"]}, body=b"\xc2\xa3") r2 = self.response_class("http://www.example.com", encoding='utf-8', body=u"\xa3") @@ -225,12 +219,12 @@ class TextResponseTest(BaseResponseTest): headers={"Content-type": ["text/html; charset=utf-8"]}, body=b"\xef\xbb\xbfWORD\xe3\xab") self.assertEqual(r6.encoding, 'utf-8') - self.assertEqual(r6.text, u'WORD\ufffd\ufffd') + self.assertEqual(r6.body_as_unicode(), u'WORD\ufffd\ufffd') def test_bom_is_removed_from_body(self): # Inferring encoding from body also cache decoded body as sideeffect, # this test tries to ensure that calling response.encoding and - # response.text in indistint order doesn't affect final + # response.body_as_unicode() in indistint order doesn't affect final # values for encoding and decoded body. url = 'http://example.com' body = b"\xef\xbb\xbfWORD" @@ -239,9 +233,9 @@ class TextResponseTest(BaseResponseTest): # Test response without content-type and BOM encoding response = self.response_class(url, body=body) self.assertEqual(response.encoding, 'utf-8') - self.assertEqual(response.text, u'WORD') + self.assertEqual(response.body_as_unicode(), u'WORD') response = self.response_class(url, body=body) - self.assertEqual(response.text, u'WORD') + self.assertEqual(response.body_as_unicode(), u'WORD') self.assertEqual(response.encoding, 'utf-8') # Body caching sideeffect isn't triggered when encoding is declared in @@ -249,9 +243,9 @@ class TextResponseTest(BaseResponseTest): # body response = self.response_class(url, headers=headers, body=body) self.assertEqual(response.encoding, 'utf-8') - self.assertEqual(response.text, u'WORD') + self.assertEqual(response.body_as_unicode(), u'WORD') response = self.response_class(url, headers=headers, body=body) - self.assertEqual(response.text, u'WORD') + self.assertEqual(response.body_as_unicode(), u'WORD') self.assertEqual(response.encoding, 'utf-8') def test_replace_wrong_encoding(self): @@ -259,18 +253,18 @@ class TextResponseTest(BaseResponseTest): r = self.response_class("http://www.example.com", encoding='utf-8', body=b'PREFIX\xe3\xabSUFFIX') # XXX: Policy for replacing invalid chars may suffer minor variations # but it should always contain the unicode replacement char (u'\ufffd') - assert u'\ufffd' in r.text, repr(r.text) - assert u'PREFIX' in r.text, repr(r.text) - assert u'SUFFIX' in r.text, repr(r.text) + assert u'\ufffd' in r.body_as_unicode(), repr(r.body_as_unicode()) + assert u'PREFIX' in r.body_as_unicode(), repr(r.body_as_unicode()) + assert u'SUFFIX' in r.body_as_unicode(), repr(r.body_as_unicode()) # Do not destroy html tags due to encoding bugs r = self.response_class("http://example.com", encoding='utf-8', \ body=b'\xf0<span>value</span>') - assert u'<span>value</span>' in r.text, repr(r.text) + assert u'<span>value</span>' in r.body_as_unicode(), repr(r.body_as_unicode()) # FIXME: This test should pass once we stop using BeautifulSoup's UnicodeDammit in TextResponse - #r = self.response_class("http://www.example.com", body=b'PREFIX\xe3\xabSUFFIX') - #assert u'\ufffd' in r.text, repr(r.text) + #r = self.response_class("http://www.example.com", body='PREFIX\xe3\xabSUFFIX') + #assert u'\ufffd' in r.body_as_unicode(), repr(r.body_as_unicode()) def test_selector(self): body = b"<html><head><title>Some page</title><body></body></html>"
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_hyperlinks", "has_removed_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 1, "issue_text_score": 1, "test_score": 2 }, "num_modified_files": 9 }
1.0
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": [ "apt-get update", "apt-get install -y gcc" ], "python": "3.9", "reqs_path": [ "requirements.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
attrs==25.3.0 Automat==24.8.1 cffi==1.17.1 constantly==23.10.4 cryptography==44.0.2 cssselect==1.3.0 exceptiongroup==1.2.2 hyperlink==21.0.0 idna==3.10 incremental==24.7.2 iniconfig==2.1.0 jmespath==1.0.1 lxml==5.3.1 packaging==24.2 parsel==1.10.0 pluggy==1.5.0 pyasn1==0.6.1 pyasn1_modules==0.4.2 pycparser==2.22 PyDispatcher==2.0.7 pyOpenSSL==25.0.0 pytest==8.3.5 queuelib==1.7.0 -e git+https://github.com/scrapy/scrapy.git@7d24df37380cd5a5b7394cd2534e240bd2eff0ca#egg=Scrapy service-identity==24.2.0 six==1.17.0 tomli==2.2.1 Twisted==24.11.0 typing_extensions==4.13.0 w3lib==2.3.1 zope.interface==7.2
name: scrapy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - attrs==25.3.0 - automat==24.8.1 - cffi==1.17.1 - constantly==23.10.4 - cryptography==44.0.2 - cssselect==1.3.0 - exceptiongroup==1.2.2 - hyperlink==21.0.0 - idna==3.10 - incremental==24.7.2 - iniconfig==2.1.0 - jmespath==1.0.1 - lxml==5.3.1 - packaging==24.2 - parsel==1.10.0 - pluggy==1.5.0 - pyasn1==0.6.1 - pyasn1-modules==0.4.2 - pycparser==2.22 - pydispatcher==2.0.7 - pyopenssl==25.0.0 - pytest==8.3.5 - queuelib==1.7.0 - service-identity==24.2.0 - six==1.17.0 - tomli==2.2.1 - twisted==24.11.0 - typing-extensions==4.13.0 - w3lib==2.3.1 - zope-interface==7.2 prefix: /opt/conda/envs/scrapy
[ "tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_immediate_error" ]
[ "tests/test_engine.py::EngineTest::test_crawler", "tests/test_http_response.py::TextResponseTest::test_invalid_utf8_encoded_body_with_valid_utf8_BOM", "tests/test_http_response.py::TextResponseTest::test_selector", "tests/test_http_response.py::TextResponseTest::test_selector_shortcuts", "tests/test_http_response.py::HtmlResponseTest::test_invalid_utf8_encoded_body_with_valid_utf8_BOM", "tests/test_http_response.py::HtmlResponseTest::test_selector", "tests/test_http_response.py::HtmlResponseTest::test_selector_shortcuts", "tests/test_http_response.py::XmlResponseTest::test_invalid_utf8_encoded_body_with_valid_utf8_BOM", "tests/test_http_response.py::XmlResponseTest::test_selector", "tests/test_http_response.py::XmlResponseTest::test_selector_shortcuts" ]
[ "tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_ignore_robotstxt_request", "tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt", "tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_empty_response", "tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_error", "tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_garbage", "tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_meta", "tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_ready_parser", "tests/test_downloadermiddleware_robotstxt.py::RobotsTxtMiddlewareTest::test_robotstxt_settings", "tests/test_engine.py::EngineTest::test_close_downloader", "tests/test_engine.py::EngineTest::test_close_engine_spiders_downloader", "tests/test_engine.py::EngineTest::test_close_spiders_downloader", "tests/test_http_response.py::BaseResponseTest::test_copy", "tests/test_http_response.py::BaseResponseTest::test_copy_inherited_classes", "tests/test_http_response.py::BaseResponseTest::test_copy_meta", "tests/test_http_response.py::BaseResponseTest::test_immutable_attributes", "tests/test_http_response.py::BaseResponseTest::test_init", "tests/test_http_response.py::BaseResponseTest::test_replace", "tests/test_http_response.py::BaseResponseTest::test_urljoin", "tests/test_http_response.py::TextResponseTest::test_bom_is_removed_from_body", "tests/test_http_response.py::TextResponseTest::test_copy", "tests/test_http_response.py::TextResponseTest::test_copy_inherited_classes", "tests/test_http_response.py::TextResponseTest::test_copy_meta", "tests/test_http_response.py::TextResponseTest::test_declared_encoding_invalid", "tests/test_http_response.py::TextResponseTest::test_encoding", "tests/test_http_response.py::TextResponseTest::test_immutable_attributes", "tests/test_http_response.py::TextResponseTest::test_init", "tests/test_http_response.py::TextResponseTest::test_replace", "tests/test_http_response.py::TextResponseTest::test_replace_wrong_encoding", "tests/test_http_response.py::TextResponseTest::test_unicode_body", "tests/test_http_response.py::TextResponseTest::test_unicode_url", "tests/test_http_response.py::TextResponseTest::test_urljoin", "tests/test_http_response.py::TextResponseTest::test_urljoin_with_base_url", "tests/test_http_response.py::TextResponseTest::test_utf16", "tests/test_http_response.py::HtmlResponseTest::test_bom_is_removed_from_body", "tests/test_http_response.py::HtmlResponseTest::test_copy", "tests/test_http_response.py::HtmlResponseTest::test_copy_inherited_classes", "tests/test_http_response.py::HtmlResponseTest::test_copy_meta", "tests/test_http_response.py::HtmlResponseTest::test_declared_encoding_invalid", "tests/test_http_response.py::HtmlResponseTest::test_encoding", "tests/test_http_response.py::HtmlResponseTest::test_html5_meta_charset", "tests/test_http_response.py::HtmlResponseTest::test_html_encoding", "tests/test_http_response.py::HtmlResponseTest::test_immutable_attributes", "tests/test_http_response.py::HtmlResponseTest::test_init", "tests/test_http_response.py::HtmlResponseTest::test_replace", "tests/test_http_response.py::HtmlResponseTest::test_replace_wrong_encoding", "tests/test_http_response.py::HtmlResponseTest::test_unicode_body", "tests/test_http_response.py::HtmlResponseTest::test_unicode_url", "tests/test_http_response.py::HtmlResponseTest::test_urljoin", "tests/test_http_response.py::HtmlResponseTest::test_urljoin_with_base_url", "tests/test_http_response.py::HtmlResponseTest::test_utf16", "tests/test_http_response.py::XmlResponseTest::test_bom_is_removed_from_body", "tests/test_http_response.py::XmlResponseTest::test_copy", "tests/test_http_response.py::XmlResponseTest::test_copy_inherited_classes", "tests/test_http_response.py::XmlResponseTest::test_copy_meta", "tests/test_http_response.py::XmlResponseTest::test_declared_encoding_invalid", "tests/test_http_response.py::XmlResponseTest::test_encoding", "tests/test_http_response.py::XmlResponseTest::test_immutable_attributes", "tests/test_http_response.py::XmlResponseTest::test_init", "tests/test_http_response.py::XmlResponseTest::test_replace", "tests/test_http_response.py::XmlResponseTest::test_replace_encoding", "tests/test_http_response.py::XmlResponseTest::test_replace_wrong_encoding", "tests/test_http_response.py::XmlResponseTest::test_unicode_body", "tests/test_http_response.py::XmlResponseTest::test_unicode_url", "tests/test_http_response.py::XmlResponseTest::test_urljoin", "tests/test_http_response.py::XmlResponseTest::test_urljoin_with_base_url", "tests/test_http_response.py::XmlResponseTest::test_utf16", "tests/test_http_response.py::XmlResponseTest::test_xml_encoding" ]
[]
BSD 3-Clause "New" or "Revised" License
399
abh1nav__gnippy-16
f9fc42bf37cb9f415df18fd7f72a238d1cbd69e0
2016-01-28 04:30:48
f9fc42bf37cb9f415df18fd7f72a238d1cbd69e0
diff --git a/gnippy/rules.py b/gnippy/rules.py index 0309b5d..f5f1d04 100644 --- a/gnippy/rules.py +++ b/gnippy/rules.py @@ -2,6 +2,11 @@ import json +try: + from urllib.parse import urlparse +except: + from urlparse import urlparse + import requests from six import string_types @@ -82,6 +87,17 @@ def _post(conf, built_rules): error_text = "HTTP Response Code: %s, Text: '%s'" % (str(r.status_code), r.text) raise RuleAddFailedException(error_text) +def _generate_delete_url(conf): + """ + Generate the Rules URL for a DELETE request. + """ + generated_url = _generate_rules_url(conf['url']) + parsed_url = urlparse(generated_url) + query = parsed_url.query + if query != '': + return generated_url.replace(query, query + "&_method=delete") + else: + return generated_url + "?_method=delete" def _delete(conf, built_rules): """ @@ -99,7 +115,7 @@ def _delete(conf, built_rules): built_rules: A single or list of built rules. """ _check_rules_list(built_rules) - rules_url = _generate_rules_url(conf['url']) + "?_method=delete" + rules_url = _generate_delete_url(conf) delete_data = json.dumps(_generate_post_object(built_rules)) r = requests.post(rules_url, auth=conf['auth'], data=delete_data) if not r.status_code in range(200,300): diff --git a/setup.py b/setup.py index 42ddcbc..d25cd66 100644 --- a/setup.py +++ b/setup.py @@ -3,7 +3,7 @@ import os import sys -version = "0.5.0" +version = "0.5.1" try: from setuptools import setup
Rules URL is incorrectly generated if endpoint URL has parameters This does not work if `conf['url']` has params. ``` rules_url = _generate_rules_url(conf['url']) + "?_method=delete" ``` Example generated URL: ``` https://api.gnip.com:443/accounts/XXX/publishers/twitter/streams/track/prod/rules.json?client=2?_method=delete ```
abh1nav/gnippy
diff --git a/gnippy/test/test_rules.py b/gnippy/test/test_rules.py index 4310376..a79a857 100644 --- a/gnippy/test/test_rules.py +++ b/gnippy/test/test_rules.py @@ -258,6 +258,20 @@ class RulesTestCase(unittest.TestCase): r = rules.get_rules(config_file_path=test_utils.test_config_path) self.assertEqual(1, len(r)) + def test_generate_delete_url_normal_case(self): + """ Check if the Delete URL is generated correctly. """ + conf = { 'url': 'https://stream.gnip.com:443/accounts/XXX/publishers/twitter/streams/track/prod.json' } + url = rules._generate_delete_url(conf) + self.assertEqual('https://api.gnip.com:443/accounts/XXX/publishers/twitter/streams/track/prod/rules.json?_method=delete', + url) + + def test_generate_delete_url_with_query(self): + """ Account for https://github.com/abh1nav/gnippy/issues/15 """ + conf = { 'url': 'https://stream.gnip.com:443/accounts/XXX/publishers/twitter/streams/track/prod.json?client=2' } + url = rules._generate_delete_url(conf) + self.assertEqual('https://api.gnip.com:443/accounts/XXX/publishers/twitter/streams/track/prod/rules.json?client=2&_method=delete', + url) + @mock.patch('requests.post', good_delete) def test_delete_rules_single(self): """ Delete one rule. """
{ "commit_name": "head_commit", "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false, "llm_score": { "difficulty_score": 2, "issue_text_score": 1, "test_score": 1 }, "num_modified_files": 2 }
0.5
{ "env_vars": null, "env_yml_path": null, "install": "pip install -e .", "log_parser": "parse_log_pytest", "no_use_env": null, "packages": "requirements.txt", "pip_packages": [ "pytest" ], "pre_install": null, "python": "3.9", "reqs_path": [ "requirements-dev.txt" ], "test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning" }
cachetools==5.5.2 chardet==5.2.0 colorama==0.4.6 distlib==0.3.9 exceptiongroup==1.2.2 filelock==3.18.0 -e git+https://github.com/abh1nav/gnippy.git@f9fc42bf37cb9f415df18fd7f72a238d1cbd69e0#egg=gnippy iniconfig==2.1.0 mock==1.0.1 nose==1.3.0 packaging==24.2 platformdirs==4.3.7 pluggy==1.5.0 pyproject-api==1.9.0 pytest==8.3.5 requests==2.8.1 six==1.10.0 tomli==2.2.1 tox==4.25.0 typing_extensions==4.13.0 virtualenv==20.29.3
name: gnippy channels: - defaults - https://repo.anaconda.com/pkgs/main - https://repo.anaconda.com/pkgs/r - conda-forge dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2025.2.25=h06a4308_0 - ld_impl_linux-64=2.40=h12ee557_0 - libffi=3.4.4=h6a678d5_1 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.16=h5eee18b_0 - pip=25.0=py39h06a4308_0 - python=3.9.21=he870216_1 - readline=8.2=h5eee18b_0 - setuptools=75.8.0=py39h06a4308_0 - sqlite=3.45.3=h5eee18b_0 - tk=8.6.14=h39e8969_0 - tzdata=2025a=h04d1e81_0 - wheel=0.45.1=py39h06a4308_0 - xz=5.6.4=h5eee18b_1 - zlib=1.2.13=h5eee18b_1 - pip: - cachetools==5.5.2 - chardet==5.2.0 - colorama==0.4.6 - distlib==0.3.9 - exceptiongroup==1.2.2 - filelock==3.18.0 - iniconfig==2.1.0 - mock==1.0.1 - nose==1.3.0 - packaging==24.2 - platformdirs==4.3.7 - pluggy==1.5.0 - pyproject-api==1.9.0 - pytest==8.3.5 - requests==2.8.1 - six==1.10.0 - tomli==2.2.1 - tox==4.25.0 - typing-extensions==4.13.0 - virtualenv==20.29.3 prefix: /opt/conda/envs/gnippy
[ "gnippy/test/test_rules.py::RulesTestCase::test_generate_delete_url_normal_case", "gnippy/test/test_rules.py::RulesTestCase::test_generate_delete_url_with_query" ]
[]
[ "gnippy/test/test_rules.py::RulesTestCase::test_add_many_rules_no_creds", "gnippy/test/test_rules.py::RulesTestCase::test_add_many_rules_not_ok", "gnippy/test/test_rules.py::RulesTestCase::test_add_many_rules_ok", "gnippy/test/test_rules.py::RulesTestCase::test_add_one_rule_no_creds", "gnippy/test/test_rules.py::RulesTestCase::test_add_one_rule_not_ok", "gnippy/test/test_rules.py::RulesTestCase::test_add_one_rule_ok", "gnippy/test/test_rules.py::RulesTestCase::test_build_post_object", "gnippy/test/test_rules.py::RulesTestCase::test_build_rule_bad_args", "gnippy/test/test_rules.py::RulesTestCase::test_build_rule_with_tag", "gnippy/test/test_rules.py::RulesTestCase::test_build_rule_without_tag", "gnippy/test/test_rules.py::RulesTestCase::test_build_rules_url", "gnippy/test/test_rules.py::RulesTestCase::test_build_rules_url_bad", "gnippy/test/test_rules.py::RulesTestCase::test_check_many_rules_ok", "gnippy/test/test_rules.py::RulesTestCase::test_check_one_rule_extra_stuff_in_rule", "gnippy/test/test_rules.py::RulesTestCase::test_check_one_rule_ok", "gnippy/test/test_rules.py::RulesTestCase::test_check_one_rule_typo_tag", "gnippy/test/test_rules.py::RulesTestCase::test_check_one_rule_typo_values", "gnippy/test/test_rules.py::RulesTestCase::test_check_rule_tag_none", "gnippy/test/test_rules.py::RulesTestCase::test_delete_rules_multiple", "gnippy/test/test_rules.py::RulesTestCase::test_delete_rules_single", "gnippy/test/test_rules.py::RulesTestCase::test_get_rules_bad_json", "gnippy/test/test_rules.py::RulesTestCase::test_get_rules_bad_status_code", "gnippy/test/test_rules.py::RulesTestCase::test_get_rules_no_rules_field_json", "gnippy/test/test_rules.py::RulesTestCase::test_get_rules_requests_get_exception", "gnippy/test/test_rules.py::RulesTestCase::test_get_rules_success_no_rules", "gnippy/test/test_rules.py::RulesTestCase::test_get_rules_success_one_rule" ]
[]
Apache License 2.0
400